File spacewalk-utils-git-0.1e68a87.obscpio of Package spacewalk-utils

07070100000000000041FD00000000000000000000000362C3F37D00000000000000000000000000000000000000000000001000000000spacewalk-utils07070100000001000081B400000000000000000000000162C3F37D000015C2000000000000000000000000000000000000002F00000000spacewalk-utils/spacewalk-export-channels.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
<!ENTITY PRODUCT "Red Hat Satellite or Spacewalk server">
]>
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-export-channels</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 0.9</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-export-channels</command></RefName>
<RefPurpose>
Utility for exporting the minimal set of RPMs and metadata required to recreate the &PRODUCT; channel content
</RefPurpose>
</RefNameDiv>


<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-export-channels</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-d <replaceable>DIRECTORY</replaceable></arg>
        <arg>--dir=<replaceable>DIRECTORY</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-e</arg>
        <arg>--exported-only</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-f</arg>
        <arg>--force</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-o <replaceable>ORG_ID</replaceable></arg>
        <arg>--org_id=<replaceable>ORG_ID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-q</arg>
        <arg>--quiet</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-s</arg>
        <arg>--skip-repogeneration</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-S</arg>
        <arg>--no-size</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-v</arg>
        <arg>--verbose</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg>
        <arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>


<RefSect1><Title>Description</Title>
  <para>
      <emphasis>spacewalk-export-channels</emphasis> exports the minimal set of RPMs and channel-metadata required to be able to recreate the channel-contents.
  </para>

  <para>
      <emphasis>spacewalk-export-channels</emphasis> exports the data and content necessary to recreate the channels of the &PRODUCT; instance, <emphasis>assuming that online repositories and Red Hat content are otherwise available</emphasis>. Data and content of online repositories and Red Hat-provided channels are not included.
  </para>

  <para>
      Requests can by limited by specifying specific organization-ids of interest; only channels owned by the specified organizations will be considered.
  </para>

  <para>
      The structure of the export directory is:
  </para>
  <screen>
      export-dir:
        export.csv
        {org-id}:
            {channel-id}.csv
            {channel-id}:
                {rpm1}.rpm
                {rpm2}.rpm
                {rpm3}.rpm
                repodata:
                    {checksum}-other.xml.gz
                    {checksum}-filelists.xml.gz
                    {checksum}-primary.xml.gz
                    repomd.xml
  </screen>

</RefSect1>


<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-d, --dir=<replaceable>DIRECTORY</replaceable></term>
        <listitem>
            <para>Specify export-directory (will be created if necessary) - <emphasis>REQUIRED</emphasis></para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-e, --exported-only</term>
        <listitem>
            <para>CSV output will contain only exported packages (by default, CVS output contains all packages, even those available in external repositories and in clone original channels)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-f, --force</term>
        <listitem>
          <para>Overwrite exported package rpms, even if already present in the dump
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-o <replaceable>ORG-ID</replaceable>, --org_id=<replaceable>ORG-ID</replaceable></term>
        <listitem>
        <para>Limit channels to those owned by the specified organization-id. Can be specified multiple times to export channels for multiple organizations.  If not specified, all organizations' channels will be exported.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-s, --skip-repogeneration</term>
        <listitem>
        <para>Omit repodata generation for exported channels</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-S, --no-size</term>
        <para>Do not check package size. Speeds up the export, but misses a chance to catch errors.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-q --quiet</term>
        <listitem>
        <para>Log only errors</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-v, --verbose</term>
        <listitem>
        <para>Increased logging output, may be used several times</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>


<RefSect1><Title>Examples</Title>
<example>
  <title>Export all channels to the specified directory</title>
  <para>spacewalk-export-channels -d /tmp/exports</para>
</example>
<example>
  <title>Export only channels for org-id 4 and 7 to specified location</title>
  <para>spacewalk-export-channels -o 4 -o 7 -d /tmp/exports</para>
</example>
</RefSect1>


<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Tomáš Leštách<email>tlestach@redhat.com</email></member>
</simplelist>
</RefSect1>
</refentry>
07070100000002000081B400000000000000000000000162C3F37D00000D54000000000000000000000000000000000000002300000000spacewalk-utils/spacewalk-api.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-api</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-api</command></RefName>
<RefPurpose>
Call Spacawalk API from command line.
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-api [OPTIONS] --server=spacewalk.domain.com FUNCTION [PARAM1 PARAM2 ...]</command>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para><emphasis>spacewalk-api</emphasis> interact with the Spacewalk server and expose its API layer.</para>
    <para>FUNCTION is api method you wish to call and is followed by its parameters.  There is few special substitutions available:</para>
    <para><emphasis>%session%</emphasis> - is replaced with sessionid. If you use --nologin option, then it is replaced by empty string.</para>
    <para><emphasis>%file:/some/file%</emphasis> - is replaced by content of given file.</para>
    <para><emphasis>%boolean:value%</emphasis> - is interpreted as a boolean value. Use <emphasis>0</emphasis> as <emphasis>false</emphasis> and <emphasis>any other integer</emphasis> as <emphasis>true</emphasis>.</para>
    <para><emphasis>%integer:value%</emphasis> - is interpreted as an integer value.</para>
    <para><emphasis>%string:value%</emphasis> - is interpreted as a string value.</para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--server</term>
        <listitem>
            <para>URL of your Spacewalk server.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--login --nologin</term>
        <listitem>
            <para>If we should log in or not. Default is to log in.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--user</term>
        <listitem>
            <para>Name of user to log in.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--password</term>
        <listitem>
            <para>If you do not specify this and unless --nologin is specified, you will be prompted for your password.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>EXAMPLES</Title>
    <para>spacewalk-api --server=spacewalk.com --nologin api.systemVersion</para>
    <para>spacewalk-api --server=spacewalk.com --user=foo --password=bar channel.listAllChannels "%session%"</para>
    <para>spacewalk-api --server=spacewalk.com --user=foo --password=bar user.setReadOnly "%session%" user "%boolean:1%"</para>
    <para>spacewalk-api --server=spacewalk.com --nologin proxy.isProxy '%file:/etc/sysconfig/rhn/systemid%'</para>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Miroslav Suchý<email>msuchy@redhat.com</email></member>
    <member>Tomas Kasparek<email>tkasparek@redhat.com</email></member>
</simplelist>
</RefSect1>

<RefSect1><Title>COPYRIGHT AND LICENSE</Title>
    <para>Copyright (c) 2009--2017 Red Hat, Inc.</para>
    <para>Released under GNU General Public License, version 2 (GPLv2).</para>
</RefSect1>

</RefEntry>
07070100000003000081B400000000000000000000000162C3F37D00000727000000000000000000000000000000000000001E00000000spacewalk-utils/Makefile.defs# Common pathnames and programs for the Spacewalk project
#

# if not defined, definit as a noop
TOP		?= .

# global defines which control this build and where we deploy files
ROOT		?= /usr/share/rhn
export ROOT

PREFIX		?=
export PREFIX

# Compilation stuff
CC		= gcc
PYTHON_INCLUDE	= -I/usr/include/python$(PythonVersion)
CFLAGS		= -Wall -O2 -fomit-frame-pointer $(PYTHON_INCLUDE) -fPIC
SOFLAGS		= -shared -fPIC

# Installation stuff
INSTALL		= /usr/bin/install -c --verbose
INSTALL_BIN	= $(INSTALL) -m 755
INSTALL_DATA	= $(INSTALL) -m 644
INSTALL_DIR	= $(INSTALL) -m 755 -d

# This is for the subdir part
PYFILES		= $(addsuffix .py,$(FILES))

# what do we need to install and where
INSTALL_FILES	+= $(PYFILES)
INSTALL_DEST	?= $(ROOT)/$(SUBDIR)

DIRS		+= $(addprefix $(PREFIX), \
			$(sort $(EXTRA_DIRS)) $(INSTALL_DEST))

all :: $(INSTALL_FILES)

install :: all $(DIRS) $(INSTALL_FILES)
	@$(foreach f,$(INSTALL_FILES), \
		$(INSTALL_DATA) $(f) $(PREFIX)$(INSTALL_DEST)/$(f) ; )

$(DIRS):
	$(INSTALL_DIR) $@

clean ::
	@rm -fv *~ *.pyc *.pyo .??*~
	@rm -fv .\#*
	@rm -fv core

# useful macro
descend-subdirs = @$(foreach d,$(SUBDIRS), $(MAKE) -C $(d) $@ || exit 1; )

# subdirs are treated at the end
all install clean:: $(SUBDIRS)
	$(descend-subdirs)


# extra toy targets
# Python checker support
PYTHONPATH      = $(TOP)
PYCHECKER       = pychecker
PYCHECKEROPTS   = --maxreturns 10 --maxbranches 15
DBCHECKER       = db-checker.py
DBCHECKEROPTS   =
DB              = user/pass@instance

pychecker :: $(PYFILES)
	@PYTHONPATH=$(PYTHONPATH) $(PYCHECKER) $(PYCHECKEROPTS) $(PYFILES) || :
	$(descend-subdirs)

db-checker :: $(PYFILES)
	@PYTHONPATH=$(PYTHONPATH) $(TOP)/$(DBCHECKER) $(DBCHECKEROPTS) $(PYFILES) || :
	$(descend-subdirs)

graphviz ::
	@PYTHONPATH=$(PYTHONPATH) $(PYCHECKER) -Z $(PYCHECKEROPTS) $(PYFILES) || exit 0

07070100000004000081FD00000000000000000000000162C3F37D000001F1000000000000000000000000000000000000003000000000spacewalk-utils/spacewalk-watch-channel-sync.sh#!/bin/bash

###################################################
#
# The script will display the progress of latest
# channel sync and update every 3 seconds
#
# Anthony Tortola 2019
#
###################################################

while [ 1 ]
do
        filename=`ls -tr /var/log/rhn/reposync|tail -1`
        clear
        echo -e "\nWatching $filename\n\n\tPress Ctrl-C to Break\n"
        echo -e "\t$(date)\n"
        tail -n20 /var/log/rhn/reposync/$filename
        sleep 3
done
exit
07070100000005000081B400000000000000000000000162C3F37D00000FDF000000000000000000000000000000000000002D00000000spacewalk-utils/spacewalk-final-archive.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-final-archive</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 0.1</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-final-archive</command></RefName>
<RefPurpose>
Utility for archiving as much exportable data as available from a running instance
</RefPurpose>
</RefNameDiv>


<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-final-archive</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--dir=<replaceable>DIR</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--no-db</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--no-reports</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--no-debug</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--no-transition</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--clean</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg>
        <arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>


<RefSect1><Title>Description</Title>
  <para>
      <emphasis>spacewalk-final-archive</emphasis> archives as much exportable data as is available from a running spacewalk system
  </para>

  <para>
      This tool leverages a number of existing spacewalk utilities, including <emphasis>db-control</emphasis>, <emphasis>spacewalk-report</emphasis>, <emphasis>spacewalk-debug</emphasis>, and <emphasis>spacewalk-export</emphasis>, to collect their output into a specified location and gather it into one compressed file that can be easily stored elsewhere as an archive of the 'final' state of the spacewalk system.
  </para>

  <para>
      Individual tools' output can be skipped if desired.
  </para>
</RefSect1>


<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
    <varlistentry>
        <term>--dir=<replaceable>DIR</replaceable></term>
        <listitem>
            <para>Specify directory to store results (will be created if not found) - defaults to /tmp/spacewalk-final
        </listitem>
    </varlistentry>
    <varlistentry>
        <term> --no-db</term>
        <listitem>
            <para>Do not gather the output of <emphasis>db-control backup</emphasis>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term> --no-reports</term>
        <listitem>
            <para>Do not gather the output of all <emphasis>spacewalk-report</emphasis>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term> --no-debug</term>
        <listitem>
            <para>Do not gather the output of <emphasis>spacewalk-debug</emphasis>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term> --no-transition</term>
        <listitem>
            <para>Do not gather the output of <emphasis>spacewalk-export</emphasis>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--clean</term>
        <listitem>
           <para>Describes how to clean up from previous runs. This does <emphasis>not</emphasis> execute the command, leaving it to the user to decide whether issuing an 'rm -rf' command as root is a good idea.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>


<RefSect1><Title>Examples</Title>
<example>
    <title>Archive all entities to the default location (/tmp/spacewalk-final)</title>
  <para>spacewalk-final-archive</para>
</example>
<example>
  <title>Archive only the database and spacewalk-debug output, using directory /tmp/arch as a destination</title>
  <para>spacewalk-final-archive --dir=/tmp/arch --no-reports --no-transition</para>
</example>
</RefSect1>


<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Grant Gainey<email>ggainey@redhat.com</email></member>
</simplelist>
</RefSect1>
</refentry>
07070100000006000081FD00000000000000000000000162C3F37D000008E6000000000000000000000000000000000000002700000000spacewalk-utils/migrate-system-profile#!/usr/bin/python3
#
# wrapper for Script to migrate systems between Orgs.
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# Authors: Pradeep Kilambi
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#


if __name__ != '__main__':
    raise ImportError("module cannot be imported")

import sys


def systemExit(code, msgs=None):
    "Exit with a code and optional message(s). Saved a few lines of code."

    if msgs:
        if type(msgs) not in [type([]), type(())]:
            msgs = (msgs, )
        for msg in msgs:
            sys.stderr.write(str(msg) + '\n')
    sys.exit(code)

_LIBPATH = "/usr/share/rhn"
# add to the path if need be
if _LIBPATH not in sys.path:
    sys.path.append(_LIBPATH)

from rhn import rhnLockfile


### acquire lock/check for other instances of migrate servers
#   i.e., lock against multiple instances of migrate servers
LOCK = None
try:
    LOCK = rhnLockfile.Lockfile('/tmp/migrate-server.pid')
except rhnLockfile.LockfileLockedException:
    systemExit(1, "ERROR: attempting to run more than one instance of migrate-server. Exiting.")

try:
    # NOTE: importing
    from utils import migrateSystemProfile
except KeyboardInterrupt:
    systemExit(0, "\nUser interrupted process.")
except ImportError as e:
    systemExit(2, "Unable to find migrate server tool.\n"
                  "Error: %s" % e)


def releaseLOCK():
    global LOCK
    if LOCK:
        LOCK.release()


def main():
    # execute
    try:
        return migrateSystemProfile.main()
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")

    releaseLOCK()
    return 0


if __name__ == '__main__':
    try:
        sys.exit(abs(main() or 0))
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")
07070100000007000081B400000000000000000000000162C3F37D00000554000000000000000000000000000000000000001900000000spacewalk-utils/Makefile# Makefile for spacewalk-utils
#

NAME := spacewalk-utils

SUBDIR  = utils

FILES	= __init__ systemSnapshot migrateSystemProfile depsolver cloneByDate
SCRIPTS = sw-system-snapshot migrate-system-profile spacewalk-api apply_errata \
	  spacewalk-common-channels delete-old-systems-interactive spacewalk-hostname-rename \
	  spacewalk-clone-by-date spacewalk-sync-setup spacewalk-manage-channel-lifecycle \
	  spacewalk-export spacewalk-export-channels spacewalk-final-archive sw-ldap-user-sync \
	  taskotop spacewalk-manage-snapshots spacewalk-watch-channel-sync.sh
CONFIGS = spacewalk-common-channels.ini sw-ldap-user-sync.conf

DOCBOOK = $(wildcard /usr/bin/docbook2man)

SGMLS	= $(wildcard *.sgml)
MANS	= $(patsubst %.sgml,%.8,$(SGMLS) )

BINDIR	= /usr/bin
MANDIR	?= /usr/man
CONFDIR = /etc/rhn

EXTRA_DIRS = $(MANDIR)/man8 $(BINDIR) $(CONFDIR)

include Makefile.defs

# install scripts
all :: $(SCRIPTS)
install :: $(SCRIPTS) $(PREFIX)/$(BINDIR)
	$(INSTALL_BIN) $(SCRIPTS) $(PREFIX)/$(BINDIR)

ifneq ($(DOCBOOK),)
# install man pages
all	:: $(MANS)

install :: $(MANS) $(PREFIX)/$(MANDIR)
	$(INSTALL_DATA) $(MANS) $(PREFIX)/$(MANDIR)/man8
install :: $(CONFIGS) $(PREFIX)/$(CONFDIR)
	$(INSTALL_DATA) $(CONFIGS) $(PREFIX)/$(CONFDIR)
endif

%.new : %
	sed -e 's|@@ROOT@@|$(ROOT)|g' <$* >$@

%.8 : %.sgml
	$(DOCBOOK) $<

clean ::
	@rm -fv $(MANS) manpage.* *.new

07070100000008000081FD00000000000000000000000162C3F37D00001831000000000000000000000000000000000000002800000000spacewalk-utils/spacewalk-final-archive#!/usr/bin/python3
#
# Utility for archiving information from a running Spacewalk/Satellite5 system
# prior to a final shutdown.
#
# Copyright (c) 2015 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

"""
spacewalk-final-archive - a tool for archiving data from an existing Spacewalk/Satellite-5
instance prior to final shutdown.
"""

import logging
import os
import sys

from optparse import OptionParser
from subprocess import Popen, call, PIPE

DEFAULT_ARCHIVE_DIR = '/tmp/spacewalk-final'
ARCHIVE_ROOT = 'archive'
DB_DIR = 'db_backup'
DB_NAME = 'DBBACKUP'
REPORTS_DIR = 'reports'
TRANSITION_DIR = 'transition'
DEBUG_DIR = 'debug'
ARCHIVE_NAME = 'final-archive.tar.bz2'


def setupOptions():
    usage = 'usage: %prog [options]'
    parser = OptionParser(usage=usage)
    parser.add_option('--dir', action='store', dest='archive_dir',
            metavar='DIR', default=DEFAULT_ARCHIVE_DIR,
            help='Specify directory to store the final archive into (will be created if not found) - defaults to ' + DEFAULT_ARCHIVE_DIR)

    parser.add_option('--no-db', action='store_true', dest='skip_db',
            default=False, help='Do not archive a database backup')
    parser.add_option('--no-reports', action='store_true', dest='skip_reports',
            default=False, help='Do not archive space-report output')
    parser.add_option('--no-debug', action='store_true', dest='skip_debug',
            default=False, help='Do not archive spacewalk-debug output')
    parser.add_option(
        '--no-transition', action='store_true', dest='skip_transition',
            default=False, help='Do not archive spacewalk-export output')
    parser.add_option('--clean', action='store_true', dest='clean',
            default=False, help='How do I clean up from previous runs?')

    return parser


def setupLogging(opt):
    logging.basicConfig(
        level=logging.INFO, format='%(levelname)s: %(message)s')
    return


def setupOutputDir(options):
    if not os.path.isdir(options.archive_dir):
        os.mkdir(options.archive_dir)
        os.chmod(options.archive_dir, 0o777)

    arch_root = '%s/%s' % (options.archive_dir, ARCHIVE_ROOT)
    if not os.path.isdir(arch_root):
        os.mkdir(arch_root)
        os.chmod(arch_root, 0o777)

    # db-dir needs to be writeable by postgres
    db_path = '%s/%s' % (arch_root, DB_DIR)
    if not os.path.isdir(db_path):
        os.mkdir(db_path)
        os.chmod(db_path, 0o777)

    report_path = '%s/%s' % (arch_root, REPORTS_DIR)
    if not os.path.isdir(report_path):
        os.mkdir(report_path)

    transition_path = '%s/%s' % (arch_root, TRANSITION_DIR)
    if not os.path.isdir(transition_path):
        os.mkdir(transition_path)

    debug_path = '%s/%s' % (arch_root, DEBUG_DIR)
    if not os.path.isdir(debug_path):
        os.mkdir(debug_path)


def _issueReport(options, reportname):
    report_file = '%s/%s/%s/%s.csv' % (
        options.archive_dir, ARCHIVE_ROOT, REPORTS_DIR, reportname)
    call(['/usr/bin/spacewalk-report', reportname], stdout=open(
        report_file, 'w'))
    return report_file


def archiveDb(options):
    db_file = '%s/%s/%s/%s' % (options.archive_dir, ARCHIVE_ROOT, DB_DIR, DB_NAME)
    call(['/usr/bin/db-control', 'online-backup', db_file])

def archiveReports(options):
    reports = Popen('/usr/bin/spacewalk-report', stdout=PIPE)
    for a_report in reports.stdout:
        _issueReport(options, a_report.strip())

def archiveDebug(options):
    dbg_dir = '%s/%s/%s' % (options.archive_dir, ARCHIVE_ROOT, DEBUG_DIR)
    call(['/usr/bin/spacewalk-debug', '--dir', dbg_dir])

def archiveTransition(options):
    transition_dir = '%s/%s/%s' % (options.archive_dir, ARCHIVE_ROOT, TRANSITION_DIR)
    call(['/usr/bin/spacewalk-export', '--export-dir', transition_dir])

def packageArchive(options):
    logging.info('...preparing to archive...')
    call(['/bin/tar', '-c', '-j',
        '-C', options.archive_dir,
        '-f', '%s/%s' % (options.archive_dir, ARCHIVE_NAME),
        ARCHIVE_ROOT])
    logging.info(
        'Archive created at %s/%s' % (options.archive_dir, ARCHIVE_NAME))

def cleanup(options):
    logging.info('To clean up, issue the following command:')
    logging.info('sudo rm -rf %s' % (options.archive_dir))
    logging.info('NOTE:  No, I will not do it for you!')
    return

def checkSuperUser():
    if os.geteuid() != 0:
        print("You must be root to run this!")
        sys.exit(1)

if __name__ == '__main__':
    parser = setupOptions()
    (options, args) = parser.parse_args()
    setupLogging(options)

    checkSuperUser()

    if (options.clean):
        cleanup(options)
        sys.exit(0)

    setupOutputDir(options)

    archived_something = False

    if options.skip_db:
        logging.info('Skipping database...')
    else:
        logging.info('Archiving database...')
        archiveDb(options)
        archived_something = True;

    if options.skip_reports:
        logging.info('Skipping reports...')
    else:
        logging.info('Archiving reports...')
        archiveReports(options)
        archived_something = True;

    if options.skip_debug:
        logging.info('Skipping debug...')
    else:
        logging.info('Archiving debug...')
        archiveDebug(options)
        archived_something = True;

    if options.skip_transition:
        logging.info('Skipping transition-export...')
    else:
        logging.info('Archiving transition-export...')
        archiveTransition(options)
        archived_something = True;

    if not archived_something:
        logging.info('...all output skipped - exiting.')
        sys.exit(0)

    packageArchive(options)

# vim:ts=4:expandtab:
07070100000009000081B400000000000000000000000162C3F37D000019D3000000000000000000000000000000000000002600000000spacewalk-utils/spacewalk-export.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN"
>
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-export</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 0.9</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-export</command></RefName>
<RefPurpose>
Utility for extracting data from an existing spacewalk instance
</RefPurpose>
</RefNameDiv>


<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-export</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--dump-repos</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--entities=<replaceable>entity[,entity...]</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--export-dir=<replaceable>DIR</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--export-package=<replaceable>FILE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--ext-pkgs</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--list-entities</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--no-size</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--org=<replaceable>ORG-ID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--skip-repogen</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--clean</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--debug</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--quiet</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg>
        <arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>


<RefSect1><Title>Description</Title>
  <para>
      <emphasis>spacewalk-export</emphasis> exports entities from a running spacewalk system
  </para>

  <para>
      Script to export a variety of entities from spacewalk in a format that can be consumed elsewhere.
  </para>

  <para>
      <emphasis>spacewalk-export</emphasis> leverages the <emphasis>spacewalk-report</emphasis> to extract entities in standard CSV format. The tool extracts the entities requested, or all entities of which it is aware, into the location specified, and then collects all results into one compressed file that can be easily transferred and consumed elsewhere.
  </para>

  <para>
      Requests can be limited by entity and/or to a specific subset of organizations. Channel-exports can be limited to external-packages-only, and repo-metadata can be included if desired.
  </para>
</RefSect1>


<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>--dump-repos</term>
        <listitem>
            <para>If specified, and if 'repositories' are being exported, the contents of any repositories specified using the <emphasis>file:</emphasis> protocol will be bundled into the final export-file.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--entities=<replaceable>entity[,entity...] </replaceable></term>
        <listitem>
            <para>Specify comma-separated list of entities to export (default is all)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--export-dir=<replaceable>DIR</replaceable></term>
        <listitem>
            <para>Specify directory to store exports in (will be created if not found) - defaults to ~/spacewalk-export-dir</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--export-package=<replaceable>FILE</replaceable></term>
        <listitem>
            <para>Specify filename to use for final packaged-exports tarfile - defaults to spacewalk_export.tar.gz</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--ext-pkgs</term>
        <listitem>
            <para>Channel-output will contain only exported packages (by default, CVS output contains all packages, even those available in external repositories and in clone original channels)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--list-entities</term>
        <listitem>
            <para>List supported entities (e.g., 'users', 'channels', etc.)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term> --no-size</term>
        <listitem>
            <para>Do not check package size. Speeds up the export, but misses a chance to catch errors.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--org=<replaceable>ORG-ID</replaceable></term>
        <listitem>
            <para>Specify an org-id whose data we will export. Can be specified multiple times to export entities for multiple organizations.  If not specified, all organizations' entities will be exported.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--skip-repogen</term>
        <listitem>
            <para>Omit repodata generation for exported channels</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--clean</term>
        <listitem>
           <para>Describes how to clean up from previous exports. This does <emphasis>not</emphasis> execute the command, leaving it to the user to decide whether issuing an 'rm -rf' command as root is a good idea.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--debug</term>
        <listitem>
            <para>Log debugging output</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--quiet</term>
        <listitem>
            <para>Log only errors</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>


<RefSect1><Title>Examples</Title>
<example>
  <title>Export all entities, including dumping repositories, to the default file locations</title>
  <para>spacewalk-export --dump-repos</para>
</example>
<example>
  <title>Export only users and channels, using directory /tmp/exports as an export-directory, and naming the final bundled-file 'my_users_and_chans.tar.gz'</title>
  <para>spacewalk-export --entities=users,channels --export-dir=/tmp/exports --export-package=my_users_and_chans.tar.gz</para>
</example>
<example>
  <title>Export only users for org-id 4 and 7 to default locations</title>
  <para>spacewalk-export --entities=users --org=4 --org=7</para>
</example>
</RefSect1>


<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Grant Gainey<email>ggainey@redhat.com</email></member>
</simplelist>
</RefSect1>
</refentry>

0707010000000A000081FD00000000000000000000000162C3F37D00006639000000000000000000000000000000000000002500000000spacewalk-utils/spacewalk-sync-setup#!/usr/bin/python3
#
# Utility for setting up ISS master/slave org-mappings
#
# Copyright (c) 2013--2017 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

""" iss_setup - a tool for easing the pain of setting up organization-mappings for ISS """

import logging
import os
import sys
from optparse import OptionParser, OptionGroup
from os import path, access, R_OK
from os.path import expanduser
import getpass
from salt.ext import six

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

try:
    from ConfigParser import SafeConfigParser
except ImportError:
    from configparser import SafeConfigParser

CONF_DIR = os.path.expanduser('~/.spacewalk-sync-setup')
USER_CONF_FILE = os.path.join(CONF_DIR, 'config')
DEFAULT_MASTER_SETUP_FILENAME = os.path.join(CONF_DIR, 'master.txt')
DEFAULT_SLAVE_SETUP_FILENAME = os.path.join(CONF_DIR, 'slave.txt')
UNKNOWN_FQDN = 'UNKNOWN-FQDN'

DEFAULT_CONFIG = """
# Default defines the slave and/or master we should connect to by default
[Default]
# Default slave FQDN
slave.default=SLAVE
# Default master FQDN
master.default=MASTER
"""

DEFAULT_CONFIG_FQDN_STANZA = """
# Any spacewalk Fully-Qualified-Domain-Name (fqdn) can have a stanza,
# defining logins and setup files to use
[FQDN]
# Login of a sat-admin login for this instance
login = LOGIN
# NOTE: Putting passwords in cfg-files is suboptimal.  The tool will ask
# But if you really want to, go ahead
password = PASSWORD
# Where's the slave-side setup file for this spacewalk instance?
slave.setup = SLAVE_FILE
# Where's the master-side setup file for this spacewalk instance?
master.setup = MASTER_FILE
"""


def setupOptions():
    usage = 'usage: %prog [options]'
    parser = OptionParser(usage=usage)

    cnxGrp = OptionGroup(parser, "Connections", "Identify the spacewalk instances we're going to connect to")
    cnxGrp.add_option('--ss', '--slave-server', action='store', dest='slave', default=UNKNOWN_FQDN,
                      metavar='SLAVE-FQDN',
                      help="name of a slave to connect to.")
    cnxGrp.add_option('--sl', '--slave-login', action='store', dest='slave_login', default="",
                      metavar='LOGIN',
                      help="A sat-admin login for slave-server")
    cnxGrp.add_option('--sp', '--slave-password', action='store', dest='slave_password', default="",
                      metavar='PASSWORD',
                      help="Password for login slave-login on slave-server")
    cnxGrp.add_option('--ms', '--master-server', action='store', dest='master', default=UNKNOWN_FQDN,
                      metavar='MASTER-FQDN',
                      help="name of a master to connect to.")
    cnxGrp.add_option('--ml', '--master-login', action='store', dest='master_login', default="",
                      metavar='LOGIN',
                      help="A sat-admin login for master-server")
    cnxGrp.add_option('--mp', '--master-password', action='store', dest='master_password', default="",
                      metavar='PASSWORD',
                      help="Password for login master-login on master-server")
    cnxGrp.add_option('--md', '--master-default', action='store_true', dest='master_default', default=False,
                      help="Should the specified master be made the default-master in a specified template-file")
    parser.add_option_group(cnxGrp)

    tmplGrp = OptionGroup(parser, "Templates",
                          "Options for creating initial versions of setup files\n"
                          "NOTE: This will replace existing machine-specific stanzas with new content")
    tmplGrp.add_option('--cst', '--create-slave-template', action='store_true', dest='slave_template', default=False,
                       help="Create/update a setup file containing a stanza for the slave we're pointed at, "
                       "based on information from the master we're pointed at")
    tmplGrp.add_option('--cmt', '--create-master-template', action='store_true', dest='master_template', default=False,
                       help="Create/update a setup file stanza for the master we're pointed at, "
                       "based on information from the slave we're pointed at")
    tmplGrp.add_option('--ct', '--create-templates', action='store_true', dest='both_template', default=False,
                       help="Create both a master and a slave setup file, for the master/slave pair we're pointed at")
    parser.add_option_group(tmplGrp)

    setupGrp = OptionGroup(parser, "Setup",
                           "Specify the setup files we're actually going to apply to a slave/master")
    setupGrp.add_option('--msf', '--master-setup-file', action='store', dest='master_file', metavar='FILE',
                        default=DEFAULT_MASTER_SETUP_FILENAME,
                        help='Specify the master-setup-file we should use')
    setupGrp.add_option('--ssf', '--slave-setup-file', action='store', dest='slave_file', metavar='FILE',
                        default=DEFAULT_SLAVE_SETUP_FILENAME,
                        help='Specify the slave-setup-file we should use')
    parser.add_option_group(setupGrp)

    actionGrp = OptionGroup(parser, "Action", "Should we actually affect the specified spacewalk instances?")
    actionGrp.add_option('--dt', '--describe-templates', action='store_true', dest='describe_templates', default=False,
        help="Describe existing templates for master and slave hosts.")
    actionGrp.add_option('--apply', action='store_true', dest='apply', default=False,
        help="make the changes specified by the setup files to the specified spacewalk instances")
    actionGrp.add_option('--ch', '--configured-hosts', action='store_true', dest='configured_hosts', default=False,
        help="Use all configured hosts from the default configuration if not explicitly specified.")
    parser.add_option_group(actionGrp)

    utilGrp = OptionGroup(parser, "Utility")
    utilGrp.add_option('-d', '--debug', action='store_true', default=False, dest='debug',
                       help='Log debugging output')
    utilGrp.add_option('-q', '--quiet', action='store_true', default=False, dest='quiet',
                       help='Log only errors')
    parser.add_option_group(utilGrp)

    return parser


def setupLogging(opt):
    # determine the logging level
    if opt.debug:
        level = logging.DEBUG
    elif opt.quiet:
        level = logging.ERROR
    else:
        level = logging.INFO

    # configure logging
    logging.basicConfig(level=level, format='%(levelname)s: %(message)s')
    return


def initializeConfig(opt, handle):
    "We don't have any defaults - create some, using CLI if we have them"
    hdr = DEFAULT_CONFIG

    master_stanza = DEFAULT_CONFIG_FQDN_STANZA
    slave_stanza = DEFAULT_CONFIG_FQDN_STANZA
    master = opt.master and opt.master or ask("Fully qualified domain name for master")
    hdr = hdr.replace('MASTER', master)
    master_stanza = master_stanza.replace('FQDN', master)

    login = opt.master_login and opt.master_login or ask("Admin login for %s" % master)
    master_stanza = master_stanza.replace('LOGIN', login)

    password = opt.master_password and opt.master_password or ask("Password for %s" % master, password=True)
    master_stanza = master_stanza.replace('PASSWORD', password)

    if opt.master_file:
        master_stanza = master_stanza.replace('MASTER_FILE', opt.master_file)

    slave = opt.slave and opt.slave or ask("Fully qualified domain name for slave")
    hdr = hdr.replace('SLAVE', slave)
    slave_stanza = slave_stanza.replace('FQDN', slave)

    login = opt.slave_login and opt.slave_login or ask("Admin login for %s" % slave)
    slave_stanza = slave_stanza.replace('LOGIN', login)

    password = opt.slave_password and opt.slave_password or ask("Password for %s" % slave, password=True)
    slave_stanza = slave_stanza.replace('PASSWORD', password)

    if opt.slave_file:
        slave_stanza = slave_stanza.replace('SLAVE_FILE', opt.slave_file)

    logging.debug("Header is now " + hdr)
    logging.debug("Slave-stanza is now " + slave_stanza)
    logging.debug("Master-stanza is now " + master_stanza)

    handle.write(hdr)
    handle.write(slave_stanza)
    handle.write(master_stanza)

    return


def setupConfig(opt):
    "The cfg-values we recognize include: \n"
    "  * default master \n"
    "  * default slave \n"
    "  * For specific FQDNs: \n"
    "    * login \n"
    "    * password \n"
    "    * master-setup-file \n"
    "    * slave-setup-file \n"

    # server-specifics will be loaded from the configuration file later
    config = SafeConfigParser()

    # create an empty configuration file if one's not present
    if not os.path.isfile(USER_CONF_FILE):
        try:
            # create ~/.spacewalk-sync-setup
            if not os.path.isdir(CONF_DIR):
                logging.debug('Creating %s' % CONF_DIR)
                os.mkdir(CONF_DIR, 0o700)

            # create a template configuration file
            logging.debug('Creating configuration file: %s' % USER_CONF_FILE)
            handle = open(USER_CONF_FILE, 'w')
            initializeConfig(opt, handle)
            handle.close()
        except IOError:
            logging.error('Could not create %s' % USER_CONF_FILE)

    # load options from configuration file
    config.read([USER_CONF_FILE])

    return config


def getMasterConnectionInfo(opt, cfg):
    "Make sure we have login, password, and fqdn for MASTER, based on options and config-files"
    info = {}

    info['debug'] = opt.debug

    if 'master' in opt.__dict__:
        info['fqdn'] = opt.master
    elif cfg.has_option('Default', 'master.default'):
        info['fqdn'] = cfg.get('Default', 'master.default')
    else:  # No master - skip
        return info

    # Now that we have a master fqdn, we can get login info
    if opt.master_login:
        info['login'] = opt.master_login
    elif cfg.has_option(info['fqdn'], 'login'):
        info['login'] = cfg.get(info['fqdn'], 'login')
    else:
        return info

    # And finally pwd
    if opt.master_password:
        info['password'] = opt.master_password
    elif cfg.has_option(info['fqdn'], 'password'):
        info['password'] = cfg.get(info['fqdn'], 'password')
    else:
        return info

    return info


def getSlaveConnectionInfo(opt, cfg):
    "Make sure we have login, password, and fqdn for SLAVE, based on options and config-files"
    info = {}

    info['debug'] = opt.debug

    if 'slave' in opt.__dict__:
        info['fqdn'] = opt.slave
    elif cfg.has_option('Default', 'slave.default'):
        info['fqdn'] = cfg.get('Default', 'slave.default')
    else:  # No slave - skip
        return info

    # Now that we have a slave fqdn, we can get login info
    if opt.slave_login:
        info['login'] = opt.slave_login
    elif cfg.has_option(info['fqdn'], 'login'):
        info['login'] = cfg.get(info['fqdn'], 'login')
    else:
        return info

    # And finally pwd
    if opt.slave_password:
        info['password'] = opt.slave_password
    elif cfg.has_option(info['fqdn'], 'password'):
        info['password'] = cfg.get(info['fqdn'], 'password')
    else:
        return info

    return info


def validateConnectInfo(info):
    "Something needs to connect - make sure we have fqdn/login/pwd, and ask for "
    " anything missing"

    if not 'fqdn' in info or not info['fqdn'] or info['fqdn'] == UNKNOWN_FQDN:
        fail("Can't connect, I don't know what machine you want to go to!")
    elif "." not in info['fqdn']:
        fail("Machine domain name is not fully qualified!")
    elif not info.get('login'):
        info['login'] = ask("Admin login for " + info['fqdn'])
        if not 'login' in info:
            fail("Can't connect, I don't have a login to use!")
    
    if not 'password' in info or not info['password']:
        info['password'] = ask("Password for " + info['login'] + " on machine " + info['fqdn'], password=True)

    return info


def connectTo(info):
    logging.debug("Connect-to info = %s" % info)
    logging.info("Connecting to " + info['login'] + "@" + str(info['fqdn']))
    info = validateConnectInfo(info)
    url = "https://%(fqdn)s/rpc/api" % {"fqdn": info['fqdn']}
    cnx_dbg = 0
    if info['debug']:
        cnx_dbg = 1
    client = xmlrpclib.Server(url, verbose=cnx_dbg)
    key = client.auth.login(info['login'], info['password'])
    return {"client": client, "key": key}


def orgByName(orgs):
    org_map = {}
    for org in orgs:
        org_map[org['name']] = org['id']
    return org_map


def determineTemplateFilename(kind, fqdn, opt, cfg):
    logging.debug("detTmplFilename kind = %s, fqdn = %s, opt = %s, cfg = %s" % (kind, fqdn, opt, cfg))

    if kind == 'master':
        if opt.master_file:
            return opt.master_file
        elif cfg.has_option(fqdn, 'master.setup'):
            return cfg.get(fqdn, 'master.setup')
        else:
            return DEFAULT_MASTER_SETUP_FILENAME
    elif kind == 'slave':
        if opt.slave_file:
            return opt.slave_file
        elif cfg.has_option(fqdn, 'slave.setup') and len(cfg.get(fqdn, 'slave.setup')) != 0:
            return cfg.get(fqdn, 'slave.setup')
        else:
            return DEFAULT_SLAVE_SETUP_FILENAME
    else:
        return None


def gen_slave_template(slave_session, master_session, master, filename, dflt_master):
    "Generates a default setup applying to the specified master, for the connected-slave"
    logging.info("Generating slave-setup file " + filename)

    master_orgs = master_session['client'].org.listOrgs(master_session['key'])
    master_map = orgByName(master_orgs)
    logging.debug("MASTER ORG MAP %s" % master_map)

    slave_orgs = slave_session['client'].org.listOrgs(slave_session['key'])
    slave_map = orgByName(slave_orgs)
    logging.debug("SLAVE ORG MAP %s" % slave_map)

    slave_setup = SafeConfigParser()
    slave_setup.optionxform = str

    if path.isfile(filename) and access(filename, R_OK):
        slave_setup.readfp(open(filename))

    # Overwrite anything existing for this master - we're starting over
    if slave_setup.has_section(master):
        slave_setup.remove_section(master)

    slave_setup.add_section(master)

    if (dflt_master):
        slave_setup.set(master, 'isDefault', '1')
    else:
        slave_setup.set(master, 'isDefault', '0')

    # wget -q -O <master-ca-cert-path> http://<master-fqdn>/pub/RHN-ORG-TRUSTED-SSL-CERT

    master_ca_cert_path = '/usr/share/rhn/' + master + '_RHN-ORG-TRUSTED-SSL-CERT'
    slave_setup.set(master, 'cacert', master_ca_cert_path)

    wget_cmd = 'wget -q -O ' + master_ca_cert_path + ' http://' + master + '/pub/RHN-ORG-TRUSTED-SSL-CERT'
    logging.info("About to wget master CA cert: [" + wget_cmd + "]")
    try:
        os.system(wget_cmd)
    except Exception as e:
        logging.error("...FAILED - do you have permission to write to /usr/share/rhn?")
        logging.exception()

    for org in master_orgs:
        if org['name'] in slave_map:
            master_org = "%s|%s|%s" % (org['id'], org['name'], slave_map[org['name']])
            slave_setup.set(master, str(org['id']), str(master_org))
        else:
            master_org = "%s|%s|%s" % (org['id'], org['name'], 1)
            slave_setup.set(master, str(org['id']), master_org)

    try:
        configfile = open(filename, 'w+')
        slave_setup.write(configfile)
        configfile.close()
    except IOError as e:
        logging.error("FAILED to write to slave template [" + filename + "]")
        sys.exit(1)

    return


def gen_master_template(master_session, slave, filename):
    "Generates a default setup applying to the specified slave, for the connected-master"
    logging.info("Generating master-setup file " + filename)

    master_setup = SafeConfigParser()
    master_setup.optionxform = str

    if path.isfile(filename) and access(filename, R_OK):
        master_setup.readfp(open(filename, 'r'))

    # Overwrite anything we have for this slave - we're starting over
    if master_setup.has_section(slave):
        master_setup.remove_section(slave)

    master_setup.add_section(slave)

    if not master_setup.has_option(slave, "isEnabled"):
        master_setup.set(slave, 'isEnabled', str(1))

    if not master_setup.has_option(slave, "allowAllOrgs"):
        master_setup.set(slave, 'allowAllOrgs', str(1))

    if not master_setup.has_option(slave, "allowedOrgs"):
        idlist = []
        for org in master_session['client'].org.listOrgs(master_session['key']):
            idlist.append(org['id'])
        logging.debug("idlist %s" % idlist)
        master_setup.set(slave, 'allowedOrgs', ",".join(str(i) for i in idlist))

    try:
        mfile = open(filename, 'w+')
        master_setup.write(mfile)
        mfile.close()
    except IOError as e:
        logging.error("FAILED to write to master template [" + mfile + "]")
        sys.exit(1)

    return


def apply_slave_template(slave_session, slave_setup_filename):
    "Updates the connected slave with information for the master(s) contained in the specified slave-setup-file"
    logging.info("Applying slave-setup " + slave_setup_filename)
    client = slave_session['client']
    key = slave_session['key']

    slave_setup = SafeConfigParser()
    if path.isfile(slave_setup_filename) and access(slave_setup_filename, R_OK):
        slave_setup.readfp(open(filename))
    else:
        fail("Can't find slave-setup file [" + slave_setup_filename + "]")

    fqdns = slave_setup.sections()

    for fqdn in fqdns:
        try:
            master = client.sync.master.getMasterByLabel(key, fqdn)
        except Exception as e:
            master = client.sync.master.create(key, fqdn)

        master_orgs = []
        moids = slave_setup.options(fqdn)
        # key is either master-org-id, or one of (isDefault, cacert) - skip those
        for moid in moids:  # moid|moname|local_oid
            if moid.lower() == "isdefault" or moid.lower() == "cacert":
                continue
            minfo = slave_setup.get(fqdn, moid)
            elts = minfo.split('|')
            orginfo = {}
            orginfo['masterOrgId'] = int(elts[0])
            orginfo['masterOrgName'] = elts[1]
            if len(elts[2]) > 0:
                orginfo['localOrgId'] = int(elts[2])
            master_orgs.append(orginfo)

        client.sync.master.setMasterOrgs(key, master['id'], master_orgs)

        if slave_setup.has_option(fqdn, 'isDefault') and slave_setup.get(fqdn, 'isDefault') == '1':
            client.sync.master.makeDefault(key, master['id'])

        if slave_setup.has_option(fqdn, 'caCert'):
            client.sync.master.setCaCert(key, master['id'], slave_setup.get(fqdn, 'caCert'))

    return


def describe_slave_template(slave_setup_filename):
    "Tells us what it _would have_ done to a connected slave with information for "
    "the master(s) contained in the specified slave-setup-file"

    logging.info("Applying contents of file [" + slave_setup_filename + "] to SLAVE")

    slave_setup = SafeConfigParser()
    if path.isfile(slave_setup_filename) and access(slave_setup_filename, R_OK):
        slave_setup.readfp(open(filename))
    else:
        fail("Can't find slave-setup file [" + slave_setup_filename + "]")

    fqdns = slave_setup.sections()

    for fqdn in fqdns:
        logging.info("Updating info for master [" + fqdn + "]")
        if slave_setup.has_option(fqdn, 'isDefault') and slave_setup.get(fqdn, 'isDefault') == '1':
            logging.info("  Setting this master to the default-master for this slave")

        if slave_setup.has_option(fqdn, 'caCert'):
            logging.info("  Setting the path to this master's CA-CERT to [" + slave_setup.get(fqdn, 'caCert') + "]")

        moids = slave_setup.options(fqdn)
        for moid in moids:  # moid|moname|local_oid
            if moid.lower() == "isdefault" or moid.lower() == "cacert":
                continue
            minfo = slave_setup.get(fqdn, moid)
            elts = minfo.split('|')
            logging.info("  Mapping master OrgId [%s], named [%s], to local OrgId [%s]" %
                         (elts[0], elts[1], elts[2]))
        logging.info("")

    return


def apply_master_template(master_session, master_setup_filename):
    "Updates the connected master with information for the slave(s) contained in the specified master-setup-file"
    logging.info("Applying master-setup " + master_setup_filename)
    client = master_session['client']
    key = master_session['key']

    master_setup = SafeConfigParser()
    if path.isfile(master_setup_filename) and access(master_setup_filename, R_OK):
        master_setup.readfp(open(filename))
    else:
        fail("Can't find master-setup file [" + master_setup_filename + "]")

    fqdns = master_setup.sections()

    for fqdn in fqdns:
        try:
            slave = client.sync.slave.getSlaveByName(key, fqdn)
        except Exception as e:
            slave = client.sync.slave.create(key, fqdn, True, True)

        isEnabled = True
        allowAll = True

        if master_setup.has_option(fqdn, 'isEnabled'):
            isEnabled = master_setup.getboolean(fqdn, 'isEnabled')

        if master_setup.has_option(fqdn, 'allowAllOrgs'):
            allowAll = master_setup.getboolean(fqdn, 'allowAllOrgs')

        client.sync.slave.update(key, slave['id'], fqdn, isEnabled, allowAll)

        master_orgs = []
        if master_setup.has_option(fqdn, 'allowedOrgs'):
            master_orgs = [int(x) for x in master_setup.get(fqdn, 'allowedOrgs').split(',')]

        client.sync.slave.setAllowedOrgs(key, slave['id'], master_orgs)
    return


def describe_master_template(master_setup_filename):
    "Tells us what it _would have_ done to a connected master with information for "
    "the slave(s) contained in the specified master-setup-file"

    logging.info("Applying contents of file [" + master_setup_filename + "] to MASTER")

    master_setup = SafeConfigParser()
    if path.isfile(master_setup_filename) and access(master_setup_filename, R_OK):
        master_setup.readfp(open(filename))
    else:
        fail("Can't find master-setup file [" + master_setup_filename + "]")

    fqdns = master_setup.sections()

    for fqdn in fqdns:
        logging.info("Updating info for slave [" + fqdn + "]")

        if master_setup.has_option(fqdn, 'isEnabled'):
            logging.info("  isEnabled = %s" % master_setup.getboolean(fqdn, 'isEnabled'))
        else:
            logging.info("  isEnabled = 1")

        if master_setup.has_option(fqdn, 'allowAllOrgs'):
            logging.info("  allowAllOrgs = %s" % master_setup.getboolean(fqdn, 'allowAllOrgs'))
        else:
            logging.info("  allowAllOrgs = 1")

        master_orgs = []
        if master_setup.has_option(fqdn, 'allowedOrgs'):
            master_orgs = [int(x) for x in master_setup.get(fqdn, 'allowedOrgs').split(',')]
        logging.info("  allowedOrgs = %s" % master_orgs)

        logging.info("")

    return

def ask(msg, password=False):
    msg += ": "
    if six.PY2:
        inputfn = raw_input
    else:
        inputfn = input
    return password and getpass.getpass(msg) or inputfn(msg)


def fail(msg):
    logging.error(msg)
    logging.info("See spacewalk-sync-setup --help")
    sys.exit()


if __name__ == '__main__':
    if len(sys.argv) == 1 and os.path.exists(USER_CONF_FILE):
        sys.argv.append("-h")

    parser = setupOptions()
    (options, args) = parser.parse_args()
    setupLogging(options)
    logging.debug("OPTIONS = %s" % options)

    config = setupConfig(options)
    logging.debug("CONFIG = %s" % config)

    if (options.apply or options.describe_templates) and \
            (not options.configured_hosts and \
                 not options.master and \
                 not options.slave):
        logging.info("You should pass \"--configured-hosts\" option or specify master and/or slave hosts!")
        sys.exit(1)

    master_info = getMasterConnectionInfo(options, config)
    if options.master_template or options.slave_template or options.both_template or options.apply:
        master_cnx = connectTo(master_info)
        logging.debug("Master cnx = %s" % master_cnx)

    slave_info = getSlaveConnectionInfo(options, config)
    if options.master_template or options.slave_template or options.both_template or options.apply:
        slave_cnx = connectTo(slave_info)
        logging.debug("Slave cnx = %s" % slave_cnx)

    if options.master_template or options.both_template:
        filename = determineTemplateFilename('master', slave_info['fqdn'], options, config)
        gen_master_template(master_cnx, slave_info['fqdn'], filename)

    if options.slave_template or options.both_template:
        filename = determineTemplateFilename('slave', master_info['fqdn'], options, config)
        gen_slave_template(slave_cnx, master_cnx, master_info['fqdn'], filename, options.master_default)

    if (options.master or options.configured_hosts):
        filename = determineTemplateFilename('master', slave_info['fqdn'], options, config)
        if options.apply:
            apply_master_template(master_cnx, filename)
        elif options.describe_templates:
            describe_master_template(filename)

    if (options.slave or options.configured_hosts):
        filename = determineTemplateFilename('slave', master_info['fqdn'], options, config)
        if options.apply:
            apply_slave_template(slave_cnx, filename)
        elif options.describe_templates:
            describe_slave_template(filename)
0707010000000B000081B400000000000000000000000162C3F37D0000894B000000000000000000000000000000000000001E00000000spacewalk-utils/COPYING.GPLv3                    GNU GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU General Public License is a free, copyleft license for
software and other kinds of works.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.  We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors.  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights.  Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received.  You must make sure that they, too, receive
or can get the source code.  And you must show them these terms so they
know their rights.

  Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.

  For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software.  For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.

  Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so.  This is fundamentally incompatible with the aim of
protecting users' freedom to change the software.  The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable.  Therefore, we
have designed this version of the GPL to prohibit the practice for those
products.  If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.

  Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary.  To prevent this, the GPL assures that
patents cannot be used to render the program non-free.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Use with the GNU Affero General Public License.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:

    <program>  Copyright (C) <year>  <name of author>
    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.

  The GNU General Public License does not permit incorporating your program
into proprietary programs.  If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.  But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
0707010000000C000081FD00000000000000000000000162C3F37D00000C85000000000000000000000000000000000000002200000000spacewalk-utils/sw-ldap-user-sync#!/usr/bin/python3

## purpose: creates new spacewalk accounts for users in a specific LDAP
##          group, removes spacewalk accounts after deleting users from a
##          specific LDAP group
## copyright: B1 Systems GmbH <info@b1-systems.de>, 2011.
## license: GPLv3+, http://www.gnu.org/licenses/gpl-3.0.html
## author: Christian Berendt <berendt@b1-systems.de>, 2011.

import logging
import ldap
import yaml
import sys

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

logging.basicConfig(
    filename = "/var/log/sw-ldap-user-sync.log",
    filemode = "a",
    format = "%(asctime)s [%(levelname)-8s] %(message)s",
    level = logging.DEBUG
)

settings = yaml.load(open("/etc/rhn/sw-ldap-user-sync.conf"))

try:
    directory = ldap.initialize(settings["directory"]["url"])
    directory.simple_bind_s(settings["directory"]["user"],
                        settings["directory"]["password"])
except Exception as e:
    logging.error("unable to connect to LDAP server: %s" % e)
    sys.exit(1)

try:
    spacewalk = xmlrpclib.Server(settings["spacewalk"]["url"], verbose=0)
    spacewalk_token = spacewalk.auth.login(settings["spacewalk"]["user"],
        settings["spacewalk"]["password"])
except Exception as e:
    logging.error("unable to connect to spacewalk server: %s" % e)
    sys.exit(1)

filter = '(objectclass=groupOfNames)'
attrs = ['member']

try:
    result = spacewalk.user.list_users(spacewalk_token)
except Exception as e:
    logging.error("unable to fetch user accounts from spacewalk server: %s" % e)
    sys.exit(1)

users = {}
for user in result:
    detail = spacewalk.user.getDetails(spacewalk_token, user.get('login'))
    if user.get('use_pam'):
        users[user.get('login')] = 1

try:
    (dn, data) = directory.search_s(settings["directory"]["group"], ldap.SCOPE_SUBTREE, filter, attrs)[0]
except Exception as e:
    logging.error("unable to fetch user entries from LDAP group: %s" % e)
    sys.exit(1)

for uid in data['member']:
    filter = "(objectclass=posixAccount)"
    attrs = ['givenName', 'sn', 'mail', 'uid']

    try:
        (userdn, userdata) = directory.search_s(uid, ldap.SCOPE_SUBTREE, filter, attrs)[0]
        if userdata["uid"][0] in users:
            del users[userdata["uid"][0]]
        else:
            logging.info("creating new user account for ldap user %s" % userdata["uid"][0])

            try:
                spacewalk.user.create(spacewalk_token, userdata["uid"][0], "",
                  userdata["givenName"][0], userdata["sn"][0], userdata["mail"][0], 1)
            except Exception as e:
                logging.error("unable to create new user account %s on spacewalk server: %s" % (userdata["uid"], e))
    except Exception as e:
        logging.error("unable to fetch user details for user %s from LDAP server: %s" % (uid, e))


for user in list(users.keys()):
    logging.info("deleting user %s" % user)

    try:
        spacewalk.user.delete(spacewalk_token, user)
    except Exception as e:
        logging.error("unable to remove user account %s from spacewalk: %s"
                % (user, e))

directory.unbind()
spacewalk.auth.logout(spacewalk_token)

0707010000000D000081FD00000000000000000000000162C3F37D0000492E000000000000000000000000000000000000002800000000spacewalk-utils/spacewalk-clone-by-date#!/usr/bin/python3
#
# Clonse channels by a particular date
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

import re
import sys
import datetime
import getpass
import os
from optparse import OptionParser
import simplejson as json
import socket

try:
    # python 2
    from StringIO import StringIO
except ImportError:
    from io import StringIO

_LIBPATH = "/usr/share/rhn"
if _LIBPATH not in sys.path:
    sys.path.append(_LIBPATH)

from utils import cloneByDate
from utils.cloneByDate import UserError
import warnings


SAMPLE_CONFIG = """
{
 "username":"admin",
 "password":"redhat",
 "assumeyes":true,
 "to_date": "2011-10-01",
 "skip_depsolve":false,
 "skip_errata_depsolve":false,
 "security_only":false,
 "use_update_date":false,
 "no_errata_sync":false,
 "dry_run":false,
 "errata": ["RHSA-2014:0043", "RHBA-2014:0085"],
 "blacklist": {
                 "ALL":["sendmail"],
                 "my-rhel5-x86_64-clone":["emacs"],
                 "my-rhel5-i386-clone":["vi", "postfix.*"]
              },
 "removelist": {
                 "ALL":["compiz", "compiz-gnome"],
                 "my-rhel5-x86_64-clone":["boost.*"]
              },
 "channels":[
             {
                "rhel-x86_64-server-5": {
                    "label": "my-rhel5-x86_64-clone",
                    "existing-parent-do-not-modify": true
                },
                "rhn-tools-rhel-x86_64-server-5": {
                    "label": "my-tools-5-x86_64-clone",
                    "name": "My Clone's Name",
                    "summary": "This is my channel's summary",
                    "description": "This is my channel's description"
                }
             },
            {
                "rhel-i386-server-5": "my-rhel5-i386-clone"
             }
           ]
}
"""


def merge_config(options):
    if options.channels:
        options.channels = transform_arg_channels(options.channels)
        return options
    elif not options.config:
        return options

    if not os.path.isfile(options.config):
        raise UserError("%s does not exist." % options.config)

    try:
        config_file = open(options.config).read()
        config = json.load(StringIO(config_file))
    except:
        raise UserError("Configuration file is invalid, please check syntax.")

    #if soemthing is in the config and not passed in as an argument
    #   add it to options
    overwrite = ["username", "password", "blacklist", "removelist", "channels",
                 "server", "assumeyes", "to_date", "skip_depsolve", "skip_errata_depsolve",
                 "security_only", "use_update_date", "no_errata_sync",
                 "errata", 'dry_run']
    for key in overwrite:
        if key in config and not getattr(options, key):
            setattr(options, key, config[key])

    # If from the command line there is only one channel tree. Transform single
    # channel tree to a list of channel trees, which is what the rest of the
    # code expects
    if type(options.channels) == dict:
        options.channels = [options.channels]

    for channel_dict in options.channels:
        for key in list(channel_dict.keys()):
            # handle the just-the-lable case
            if type(channel_dict[key]) == type(""):
                channel_dict[key] = [channel_dict[key]]

    if options.blacklist:
        validate_list_dict("blacklist", options.blacklist)
    if options.removelist:
        validate_list_dict("removelist", options.removelist)

    return options


def validate_list_dict(name, pkg_dict):
    """
        Validates a removelist or blacklist to be map with lists as values
    """
    if type(pkg_dict) != type({}):
        raise UserError("%s  is not formatted correctly" % name)
    for key, value in list(pkg_dict.items()):
        if type(value) != type([]):
            raise UserError("Channel %s in %s packages not formatted correctly" % (key, name))

# Using --channels as an argument only supports a single channel 'tree'
#  So we need to convert a list of lists of channel options into a list of
#  hashes. ex:
# [
#   ["rhel-i386-servr-5", "my-rhel-clone"],
#   ['rhel-child', 'clone-child', 'clone name', 'clone summary', 'clone description']
# ]
#    should become
# [{
#  "rhel-i386-servr-5" : ["my-rhel-clone"],
#  'rhel-child': ['clone-child', 'clone name', 'clone summary', 'clone description']
#  }]


def transform_arg_channels(chan_list):
    to_ret = {}
    for channel in chan_list:
        to_ret[channel[0]] = channel[1:]
    return [to_ret]

# This hack is required because callback option types do not allow you
# to have set an explicit value, like '--channels=src_label dest_label'.
# This has always worked before, and in fact the man page says that's what
# you should do, so we can't let it not work. Instead we'll have to transform
# the option to be '--channels src_label dest_label' and then pass that on
# back to optparse, which will process correctly. Life will be much easier
# when we no longer support RHEL 5 and can migrate to argparse.


class HackedOptionParser(OptionParser):

    def _process_long_opt(self, rargs, values):
        if '=' in rargs[0]:
            arg = rargs.pop(0)
            (opt, next_arg) = arg.split("=", 1)
            rargs.insert(0, next_arg)
            rargs.insert(0, opt)
        OptionParser._process_long_opt(self, rargs, values)


def vararg_callback(option, opt_str, value, parser):
    assert value is None
    value = []

    for arg in parser.rargs:
        # stop on --foo like options
        if arg[:2] == "--" and len(arg) > 2:
            break
        # stop on -a
        if arg[:1] == "-" and len(arg) > 1:
            break
        value.append(arg)

    del parser.rargs[:len(value)]
    curr_value = getattr(parser.values, option.dest, None)
    if not curr_value:
        setattr(parser.values, option.dest, [value])
    else:
        curr_value.append(value)


def get_localhost_fqdn():
    """
    Get FQDN of the current machine.

    :return:
    """
    fqdn = None
    try:
        for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
                socket.gethostname(), 0, 0, 0, 0, socket.AI_CANONNAME):
            if canonname:
                fqdn = canonname
                break
    except socket.gaierror as exc:
        pass  # Silence here
    except Exception as exc:
        print("Unhandled exception occurred while getting FQDN:", exc)

    return fqdn or socket.getfqdn()  # Fall-back to the /etc/hosts's FQDN



def parse_args():
    parser = HackedOptionParser()
    parser.add_option("-a", "--parents", dest="parents", action='callback',
                      callback=vararg_callback, help="Already existing channel that "
                      + "will be used as parent of child channels cloned this session. "
                      + "No changes will be made to this channel unless dependency "
                      + "resolution requires it. Source parent is optional, will "
                      + "be looked up if not provided (eg. --parents [src_parent] "
                      + "dest_parent)")
    parser.add_option("-b", "--blacklist", dest="blacklist",
                      help="Comma separated list of package names (or regular "
                      + "expressions) to exclude from cloned errata (Only added "
                      + "packages will be considered).")
    parser.add_option("-c", "--config", dest="config",
                      help="Config file specifying options")
    parser.add_option("-d", "--to_date", dest="to_date",
                      help="Clone errata to the specified date (YYYY-MM-DD). "
                      + "If omitted will assume no errata.")
    parser.add_option("-e", "--errata", dest='errata', action='store',
                      help="Only clone errata in this comma separated list (and "
                      + "dependencies unless paired with --skip_depsolve) (e.g. "
                      + "--errata=RHSA-2014:0043,RHBA-2014:0085).")
    parser.add_option("-g", "--background", dest='background',
                      action='store_true', help="DEPRECATED: does nothing.")
    parser.add_option("-j", "--dry-run", dest="dry_run", action='store_true',
                      help="Creates a file for each pair of channels in the working "
                      + "directory that comprises the list of erratas that are to be cloned. "
                      + "No actual errata cloning takes place. "
                      + "Warning: If some of the destination channels do not exist, "
                      + "they will be created with the original package set.")
    parser.add_option("-k", "--skip_depsolve", dest='skip_depsolve',
                      action='store_true',
                      help="Skip all dependency solving (Not recommended).")
    parser.add_option("-l", "--channels", dest="channels", action="callback",
                      callback=vararg_callback, help="Original channel and clone "
                      + "channel labels space separated, with optional channel name and "
                      + "summary following (e.g. --channels=rhel-i386-server-5 "
                      + "myCloneLabel [myName [mySummary [myDescription]]]).  Can be specified "
                      + "multiple times.")
    parser.add_option("-m", "--sample-config", dest='sample',
                      action='store_true',
                      help="Print a sample full configuration file and exit.")
    parser.add_option("-n", "--no-errata-sync", dest="no_errata_sync",
                      action='store_true', help="Do not automatically sychronize the "
                      + "package list of cloned errata with their originals. This may "
                      + "make spacewalk-clone-by-date have unexpected results if the "
                      + "original errata have been updated (e.g.: syncing another "
                      + "architecture for a channel) since the cloned errata were "
                      + "created. If omitted we will synchronize the cloned errata "
                      + "with the originals to ensure the expected packages are "
                      + "included (default).")
    parser.add_option("-o", "--security_only", dest='security_only',
                      action='store_true',
                      help="Only clone security errata (and their dependencies).")
    parser.add_option("-p", "--password", dest="password", help="Password")
    parser.add_option("-r", "--removelist", dest="removelist",
                      help="Comma separated list of package names (or regular "
                      + "expressions) to remove from destination channel (All packages "
                      + "are available for removal).")
    parser.add_option("-s", "--server", dest="server",
                      help="Server URL to use for api connections (defaults to %default)",
                      default="https://" + get_localhost_fqdn() + "/rpc/api")
    parser.add_option("-u", "--username", dest="username", help="Username")
    parser.add_option("-v", "--validate", dest='validate', action='store_true',
                      help="Run repoclosure on the set of specified repositories.")
    parser.add_option("-y", "--assumeyes", dest='assumeyes',
                      action='store_true',
                      help="Assume yes for any prompts (unattended).")
    parser.add_option("-x", "--skip-errata-depsolve", dest="skip_errata_depsolve",
                       action='store_true', help="When pulling in an erratum to satisfy "
                       + "dependency-resolution, DO NOT add that erratum's packages to the "
                       + "list of packages to do dependency-resolution against. This will "
                       + "result in fewer RPMs/errata being included for "
                       + "dependency-resolution (sometimes MANY fewer) at the possible "
                       + "expense of a cloned channel that is not dependency-complete. If "
                       + "ommitted, we will add an erratum's RPMs to the list required for "
                       + "dependency-resolution and recurse on the list (default).")
    parser.add_option("-z", "--use-update-date", dest="use_update_date",
                      action='store_true', help="While cloning errata by date, clone "
                      + "all errata that have last been updated on or before the date "
                      + "provided by to_date. If omitted will use issue date of errata "
                      + "(default).")

    (options, args) = parser.parse_args()

    if options.parents != None:
        # vararg_callback was designed for use with --channels, fix
        options.parents = options.parents[0]
        parent_len = len(options.parents)
        if (parent_len != 1 and parent_len != 2):
            raise UserError("The -a / --parents option requires an argument")

    # have to check this option before we merge with the config file to
    # ensure that optparse is parsing the args correctly. We have to
    # check it again after the config file merge to make sure we have
    # channels.
    if options.channels != None:
        for channel_group in options.channels:
            if (len(channel_group) < 2 or len(channel_group) > 5):
                raise UserError("The -l / --channels option requires two to "
                                + "five arguments")

    if options.sample:
        print(SAMPLE_CONFIG)
        sys.exit(0)

    if options.config and options.channels:
        raise UserError("Cannot specify both --channels and --config.")

    if options.config and options.parents:
        raise UserError("Cannot specify both --parents and --config.")

    if options.blacklist:
        options.blacklist = {"ALL": options.blacklist.split(",")}

    if options.removelist:
        options.removelist = {"ALL": options.removelist.split(",")}

    if options.errata:
        options.errata = options.errata.split(',')

    options = merge_config(options)

    if options.errata and options.to_date:
        raise UserError("Cannot specify both --to_date and --errata.")

    if options.errata and options.security_only:
        raise UserError("Cannot specify both --security_only and --errata.")

    if options.channels == None or len(options.channels) == 0:
        raise UserError("No channels specified. See --help for details.")

    if not options.username:
        raise UserError("Username not specified")

    if not options.validate:
        if options.to_date:
            options.to_date = parse_time(options.to_date.strip())

    if not options.password:
        options.password = getpass.getpass()

    # Remove whitespace for bug 885782. Since bug 830609 we can no longer
    # just remove all whitespace from the config file, may have spaces in
    # channel name or description.
    options.username = options.username.strip()
    options.password = options.password.strip()
    options.server = options.server.strip()
    if options.errata:
        errata_list = []
        for errata in options.errata:
            errata_list.append(errata.strip())
        options.errata = errata_list
    for option in [getattr(options, 'blacklist', None),
                   getattr(options, 'removelist', None)]:
        if option:
            for key in list(option.keys())[:]:
                if key != key.strip():
                    option[key.strip()] = option[key]
                    del option[key]
                    key = key.strip()
                my_list = []
                for element in option[key]:
                    my_list.append(element.strip())
                option[key] = my_list
    for channel_tree in options.channels:
        for channel in list(channel_tree.keys())[:]:
            if channel != channel.strip():
                channel_tree[channel.strip()] = channel_tree[channel]
                del channel_tree[channel]
                channel = channel.strip()
            if type(channel_tree[channel]) == list:
                my_list = []
                for element in channel_tree[channel]:
                    my_list.append(element.strip())
                channel_tree[channel] = my_list
            elif type(channel_tree[channel]) == dict:
                my_dict = {}
                for key in list(channel_tree[channel].keys()):
                    if type(channel_tree[channel][key]) == str:
                        my_dict[key.strip()] = channel_tree[channel][key].strip()
                    else:
                        my_dict[key.strip()] = channel_tree[channel][key]
                channel_tree[channel] = my_dict

    return options


def parse_time(time_str):
    """
     We need to use datetime, but python 2.4 does not support strptime(),
     so we have to parse ourselves
    """
    if re.match('[0-9]{4}-[0-9]{2}-[0-9]{2}', time_str):
        try:
            split = time_str.split("-")
            date = datetime.datetime(int(split[0]), int(split[1]),
                                     int(split[2]))
        except:
            raise UserError("Invalid date (%s)" % time_str)
        return date
    else:
        raise UserError("Invalid date format (%s), expected YYYY-MM-DD" %
                        time_str)


def systemExit(code, msgs=None):
    """
     Exit with a code and optional message(s). Saved a few lines of code.
    """
    if msgs:
        if type(msgs) not in [type([]), type(())]:
            msgs = (msgs, )
        for msg in msgs:
            sys.stderr.write(str(msg) + '\n')
    sys.exit(code)


def main():
    try:
        args = parse_args()
        return cloneByDate.main(args)
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")
    except UserError as error:
        systemExit(-1, "\n%s" % error)
    return 0


if __name__ == '__main__':
    warnings.simplefilter('always', DeprecationWarning)
    warnings.warn(f'spacewalk-clone-by-date is DEPRECATED. Please use Content Lifecycle Management Tool.', DeprecationWarning, stacklevel=2)
    try:
        sys.exit(abs(main() or 0))
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")
0707010000000E000081B400000000000000000000000162C3F37D00004643000000000000000000000000000000000000001E00000000spacewalk-utils/COPYING.GPLv2		    GNU GENERAL PUBLIC LICENSE
		       Version 2, June 1991

 Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

			    Preamble

  The licenses for most software are designed to take away your
freedom to share and change it.  By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users.  This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it.  (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.)  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.

  To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have.  You must make sure that they, too, receive or can get the
source code.  And you must show them these terms so they know their
rights.

  We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.

  Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software.  If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.

  Finally, any free program is threatened constantly by software
patents.  We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary.  To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.

  The precise terms and conditions for copying, distribution and
modification follow.

		    GNU GENERAL PUBLIC LICENSE
   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

  0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License.  The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language.  (Hereinafter, translation is included without limitation in
the term "modification".)  Each licensee is addressed as "you".

Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope.  The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.

  1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.

You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.

  2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:

    a) You must cause the modified files to carry prominent notices
    stating that you changed the files and the date of any change.

    b) You must cause any work that you distribute or publish, that in
    whole or in part contains or is derived from the Program or any
    part thereof, to be licensed as a whole at no charge to all third
    parties under the terms of this License.

    c) If the modified program normally reads commands interactively
    when run, you must cause it, when started running for such
    interactive use in the most ordinary way, to print or display an
    announcement including an appropriate copyright notice and a
    notice that there is no warranty (or else, saying that you provide
    a warranty) and that users may redistribute the program under
    these conditions, and telling the user how to view a copy of this
    License.  (Exception: if the Program itself is interactive but
    does not normally print such an announcement, your work based on
    the Program is not required to print an announcement.)

These requirements apply to the modified work as a whole.  If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works.  But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.

In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.

  3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:

    a) Accompany it with the complete corresponding machine-readable
    source code, which must be distributed under the terms of Sections
    1 and 2 above on a medium customarily used for software interchange; or,

    b) Accompany it with a written offer, valid for at least three
    years, to give any third party, for a charge no more than your
    cost of physically performing source distribution, a complete
    machine-readable copy of the corresponding source code, to be
    distributed under the terms of Sections 1 and 2 above on a medium
    customarily used for software interchange; or,

    c) Accompany it with the information you received as to the offer
    to distribute corresponding source code.  (This alternative is
    allowed only for noncommercial distribution and only if you
    received the program in object code or executable form with such
    an offer, in accord with Subsection b above.)

The source code for a work means the preferred form of the work for
making modifications to it.  For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable.  However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.

If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.

  4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License.  Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.

  5. You are not required to accept this License, since you have not
signed it.  However, nothing else grants you permission to modify or
distribute the Program or its derivative works.  These actions are
prohibited by law if you do not accept this License.  Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.

  6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions.  You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.

  7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all.  For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.

If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.

It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices.  Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.

This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.

  8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded.  In such case, this License incorporates
the limitation as if written in the body of this License.

  9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

Each version is given a distinguishing version number.  If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation.  If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.

  10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission.  For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this.  Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.

			    NO WARRANTY

  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.

  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.

		     END OF TERMS AND CONDITIONS

	    How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License along
    with this program; if not, write to the Free Software Foundation, Inc.,
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

Also add information on how to contact you by electronic and paper mail.

If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:

    Gnomovision version 69, Copyright (C) year name of author
    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.

You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary.  Here is a sample; alter the names:

  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
  `Gnomovision' (which makes passes at compilers) written by James Hacker.

  <signature of Ty Coon>, 1 April 1989
  Ty Coon, President of Vice

This General Public License does not permit incorporating your program into
proprietary programs.  If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
0707010000000F000081FD00000000000000000000000162C3F37D000050FE000000000000000000000000000000000000002A00000000spacewalk-utils/spacewalk-common-channels#!/usr/bin/python3
#
# Copyright (c) 2010--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

import sys
import fnmatch
from optparse import OptionParser, Option
import re
from uyuni.common.cli import getUsernamePassword, xmlrpc_login, xmlrpc_logout

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

try:
    import ConfigParser
except ImportError:
    import configparser as ConfigParser

DEFAULT_SERVER = "localhost"

DEFAULT_CONFIG = '/etc/rhn/spacewalk-common-channels.ini'

DEFAULT_REPO_TYPE = 'yum'

CHANNEL_ARCH = {
    'aarch64':      'channel-aarch64',
    'i386':         'channel-ia32',
    'ia64':         'channel-ia64',
    'sparc':        'channel-sparc',
    'sparc64':      'channel-sparc64',
    'alpha':        'channel-alpha',
    's390':         'channel-s390',
    's390x':        'channel-s390x',
    'iSeries':      'channel-iSeries',
    'pSeries':      'channel-pSeries',
    'x86_64':       'channel-x86_64',
    'ppc':          'channel-ppc',
    'ppc64':        'channel-ppc64',
    'ppc64le':      'channel-ppc64le',
    'amd64-deb':    'channel-amd64-deb',
    'ia32-deb':     'channel-ia32-deb',
    'ia64-deb':     'channel-ia64-deb',
    'sparc-deb':    'channel-sparc-deb',
    'alpha-deb':    'channel-alpha-deb',
    's390-deb':     'channel-s390-deb',
    'powerpc-deb':  'channel-powerpc-deb',
    'arm-deb':      'channel-arm-deb',
    'mips-deb':     'channel-mips-deb'

}
CHANNEL_NAME_TO_ARCH = {
    'AArch64': 'aarch64',
    'Alpha': 'alpha',
    'Alpha Debian': 'alpha-deb',
    'AMD64 Debian': 'amd64-deb',
    'ARM64 Debian': 'arm64-deb',
    'arm Debian': 'arm-deb',
    'ARM hard. FP':  'armhfp',
    'ARM soft. FP': 'arm',
    'IA-32':  'i386',
    'IA-32 Debian': 'ia32-deb',
    'IA-64': 'ia64',
    'IA-64 Debian': 'ia64-deb',
    'iSeries': 'iSeries',
    'mips Debian': 'mips-deb',
    'PowerPC Debian': 'powerpc-deb',
    'PPC': 'ppc',
    'PPC64LE': 'ppc64le',
    'pSeries': 'pSeries',
    's390': 's390',
    's390 Debian': 's390-deb',
    's390x': 's390x',
    'Sparc': 'sparc',
    'Sparc Debian': 'sparc-deb',
    'x86_64': 'x86_64'
}

SPLIT_PATTERN = '[ ,]+'


class ExtOptionParser(OptionParser):

    """extend OptionParser to print examples"""

    def __init__(self, examples=None, **kwargs):
        self.examples = examples
        OptionParser.__init__(self, **kwargs)

    def print_help(self):
        OptionParser.print_help(self)
        print("\n\n" + self.examples)


def connect(user, password, server):
    server_url = "http://%s/rpc/api" % server

    if options.verbose and options.verbose > 2:
        client_verbose = options.verbose - 2
    else:
        client_verbose = 0
    if options.verbose:
        sys.stdout.write("Connecting to %s\n" % server_url)
    client = xmlrpclib.Server(server_url, verbose=client_verbose)
    options.user, password = getUsernamePassword(user, password)
    key = xmlrpc_login(client, options.user, password)
    return client, key


def add_channels(channels, section, arch, client):
    base_channels = ['']
    optional = ['activationkey', 'base_channel_activationkey', 'gpgkey_url',
                'gpgkey_id', 'gpgkey_fingerprint', 'repo_url',
                'yum_repo_label', 'dist_map_release', 'repo_type']
    mandatory = ['label', 'name', 'summary', 'checksum', 'arch', 'section']

    config.set(section, 'arch', arch)
    config.set(section, 'section', section)
    if config.has_option(section, 'base_channels'):
        base_channels = re.split(SPLIT_PATTERN,
                                 config.get(section, 'base_channels'), 1)

    for base_channel in base_channels:
        config.set(section, 'base_channel', base_channel)
        channel = {'base_channel': config.get(section, 'base_channel')}

        if base_channel:
            if channel['base_channel'] in channels:
                # If channel exists at the INI file, use it.
                pass
            elif channel_exists(client, channel['base_channel']):
                # If not, if cannel exists on the server, convert it
                channel_base_server = channel_get_details(client, channel['base_channel'])
                channels[channel['base_channel']] = {'label': channel_base_server['label'],
                                                     'name' : channel_base_server['name'],
                                                     'summary': channel_base_server['summary'],
                                                     'arch': CHANNEL_NAME_TO_ARCH[channel_base_server['arch_name']],
                                                     'checksum': channel_base_server['checksum_label'],
                                                     'gpgkey_url': channel_base_server['gpg_key_url'],
                                                     'gpgkey_id': channel_base_server['gpg_key_id'],
                                                     'gpgkey_fingerprint': channel_base_server['gpg_key_fp'],
                                                     'section': section}
            else:
                # Otheriwse there isn't such base channel so skip also child
                continue
            # set base channel values so they can be used as macros
            for (k, v) in list(channels[channel['base_channel']].items()):
                config.set(section, 'base_channel_' + k, v)

        for k in optional:
            if config.has_option(section, k):
                channel[k] = config.get(section, k)
            else:
                channel[k] = ''
        for k in mandatory:
            channel[k] = config.get(section, k)
        channels[channel['label']] = channel


def channel_exists(client, channel_label):
    # check whether channel exists
    try:
        base_info = client.channel.software.isExisting(key, channel_label)
        return base_info
    # We should improve this as a connection failure would be the same
    # as not finding the channel
    except xmlrpclib.Fault as e:
        return None


def channel_get_details(client, channel_label):
    # get details for a channel
    try:
        base_info = client.channel.software.getDetails(key, channel_label)
        return base_info
    # We should improve this as a connection failure would be the same
    # as not finding the channel
    except xmlrpclib.Fault as e:
        return None

def get_existing_repos(client):
    result = {}
    try:
        user_repos = client.channel.software.listUserRepos(key)
        for repo in user_repos:
            result[repo['sourceUrl']] = repo['label']
    except xmlrpclib.Fault as e:
        return None
    return result


if __name__ == "__main__":
    # options parsing
    usage = "usage: %prog [options] <channel1 glob> [<channel2 glob> ... ]"
    examples = """Examples:

Create Fedora 12 channel, its child channels and activation key limited to 10 servers:
    %(prog)s -u admin -p pass -k 10 'fedora12*'

Create Centos 5 with child channels only on x86_64:
    %(prog)s -u admin -p pass -a x86_64 'centos5*'

Create only Centos 4 base channels for intel archs:
    %(prog)s -u admin -p pass -a i386,x86_64 'centos4'

Create Spacewalk client child channel for every (suitable) defined base channel:
    %(prog)s -u admin -p pass 'spacewalk-client*'

Create everything as well as unlimited activation key for every channel:
    %(prog)s -u admin -p pass -k unlimited '*'
\n""" % {'prog': sys.argv[0]}

    option_list = [
        Option("-c", "--config", help="configuration file",
               default=DEFAULT_CONFIG),
        Option("-u", "--user", help="username"),
        Option("-p", "--password", help="password"),
        Option("-s", "--server", help="your spacewalk server",
               default=DEFAULT_SERVER),
        Option("-k", "--keys", help="activation key usage limit -"
               + " 'unlimited' or number\n"
               + "(default: options is not set and activation keys"
               + " are not created at all)",
               dest="key_limit"),
        Option("-n", "--dry-run", help="perform a trial run with no changes made",
               action="store_true"),
        Option("-a", "--archs", help="list of architectures"),
        Option("-v", "--verbose", help="verbose", action="count"),
        Option("-l", "--list", help="print list of available channels",
               action="store_true"),
        Option("-d", "--default-channels", help="make base channels default channels for given OS version",
               action="store_true"),
    ]

    parser = ExtOptionParser(usage=usage, option_list=option_list, examples=examples)
    (options, args) = parser.parse_args()
    config = ConfigParser.ConfigParser()
    config.read(options.config)

    if options.list:
        print("Available channels:")
        channel_list = config.sections()
        if channel_list:
            for channel in sorted(channel_list):
                channel_archs = config.get(channel, 'archs')
                print(" %-20s %s" % (channel + ":", channel_archs))
        else:
            print(" [no channel available]")
        sys.exit(0)

    if not args:
        print(parser.print_help())
        parser.exit()

    try:
        client, key = connect(options.user, options.password, options.server)
        user_info = client.user.getDetails(key, options.user)
        org_id = user_info['org_id']
    except xmlrpclib.Fault as e:
        if e.faultCode == 2950:
            sys.stderr.write("Either the password or username is incorrect.\n")
            sys.exit(2)
        else:
            raise

    channels = {}

    sections = []
    # sort base channels first and child last
    for section in config.sections():
        if config.has_option(section, 'base_channels'):  # child
            sections.append(section)
        else:                                           # base
            sections.insert(0, section)
    for section in sections:
        archs = re.split(SPLIT_PATTERN, config.get(section, 'archs'))
        if options.archs:
            # filter out archs not set on commandline
            archs = [a for a in archs if a in options.archs]
        for arch in archs:
            add_channels(channels, section, arch, client)

    # list of base_channels to deal with
    base_channels = {}
    # list of child_channels for given base_channel
    child_channels = {}
    # filter out non-matching channels
    for pattern in args:
        matching_channels = [n for n in list(channels.keys())
                             if fnmatch.fnmatch(channels[n]['section'], pattern)]
        for name in matching_channels:
            attr = channels[name]
            if 'base_channel' in attr and attr['base_channel']:
                if attr['base_channel'] not in base_channels:
                    base_channels[attr['base_channel']] = False
                if attr['base_channel'] in child_channels:
                    child_channels[attr['base_channel']].append(name)
                else:
                    child_channels[attr['base_channel']] = [name]
            else:
                # this channel is base channel
                base_channels[name] = True
                if name not in child_channels:
                    child_channels[name] = []

    if not matching_channels:
        sys.stderr.write("No channels matching your selection.\n")
        sys.exit(2)

    existing_repo_urls = get_existing_repos(client)
    if existing_repo_urls is None:
        sys.stderr.write("Unable to get exsiting repositories from server.\n")
        sys.exit(2)

    for (base_channel_label, create_channel) in sorted(base_channels.items()):

        if create_channel:
            base_info = channels[base_channel_label]
            repo_type = base_info.get('repo_type', DEFAULT_REPO_TYPE) or DEFAULT_REPO_TYPE
            if options.verbose:
                sys.stdout.write("Base channel '%s' - creating...\n"
                                 % base_info['name'])
            if options.verbose and options.verbose > 1:
                sys.stdout.write(
                    "* label=%s, summary=%s, arch=%s, repo_type=%s, checksum=%s\n" % (
                        base_info['label'], base_info['summary'],
                        base_info['arch'], repo_type,
                        base_info['checksum']))

            if not options.dry_run:
                try:
                    # create base channel
                    client.channel.software.create(key,
                                                   base_info['label'], base_info['name'],
                                                   base_info['summary'], CHANNEL_ARCH[base_info['arch']],
                                                   '', base_info['checksum'],
                                                   {'url': base_info['gpgkey_url'],
                                                    'id': base_info['gpgkey_id'],
                                                    'fingerprint': base_info['gpgkey_fingerprint']})
                    if base_info['repo_url'] in existing_repo_urls:
                        # use existing repo
                        client.channel.software.associateRepo(key,
                                                              base_info['label'],
                                                              existing_repo_urls[base_info['repo_url']])
                    else:
                        client.channel.software.createRepo(key,
                                                           base_info['yum_repo_label'], repo_type,
                                                           base_info['repo_url'])
                        client.channel.software.associateRepo(key,
                                                              base_info['label'], base_info['yum_repo_label'])
                except xmlrpclib.Fault as e:
                    if e.faultCode == 1200:  # ignore if channel exists
                        sys.stdout.write("INFO: %s exists\n" % base_info['label'])
                    else:
                        sys.stderr.write("ERROR: %s: %s\n" % (
                            base_info['label'], e.faultString))
                        sys.exit(2)

            if options.key_limit is not None:
                if options.verbose:
                    sys.stdout.write("* Activation key '%s' - creating...\n" % (
                        base_info['label']))
                if not options.dry_run:
                    # create activation key
                    if options.key_limit == 'unlimited':
                        ak_args = (key, base_info['activationkey'],
                                   base_info['name'], base_info['label'],
                                   [], False)
                    else:
                        ak_args = (key, base_info['activationkey'],
                                   base_info['name'], base_info['label'],
                                   int(options.key_limit), [], False)
                    try:
                        client.activationkey.create(*ak_args)
                    except xmlrpclib.Fault as e:
                        if e.faultCode != 1091:  # ignore if ak exists
                            sys.stderr.write("ERROR: %s: %s\n" % (
                                base_info['label'], e.faultString))
        else:
            # check whether channel exists
            if channel_exists(client, base_channel_label):
                base_info = channel_get_details(client, base_channel_label)
                sys.stdout.write("Base channel '%s' - exists\n" % base_info['name'])
            else:
                sys.stderr.write("ERROR: %s could not be found at the server\n" % base_channel_label)

        if options.default_channels:
            try:
                client.distchannel.setMapForOrg(key,
                                                base_info['name'], base_info['dist_map_release'],
                                                base_info['arch'], base_info['label'])

            except xmlrpclib.Fault as e:
                sys.stderr.write("ERROR: %s: %s\n" % (
                    base_info['label'], e.faultString))

        for child_channel_label in sorted(child_channels[base_channel_label]):
            child_info = channels[child_channel_label]
            repo_type = child_info.get('repo_type', DEFAULT_REPO_TYPE) or DEFAULT_REPO_TYPE
            if options.verbose:
                sys.stdout.write("* Child channel '%s' - creating...\n"
                                 % child_info['name'])
            if options.verbose and options.verbose > 1:
                sys.stdout.write(
                    "** label=%s, summary=%s, arch=%s, parent=%s, repo_type=%s, checksum=%s\n"
                    % (child_info['label'], child_info['summary'],
                       child_info['arch'], base_channel_label, repo_type,
                       child_info['checksum']))

            if not options.dry_run:
                try:
                    # create child channels
                    client.channel.software.create(key,
                                                   child_info['label'], child_info['name'],
                                                   child_info['summary'],
                                                   CHANNEL_ARCH[child_info['arch']], base_channel_label,
                                                   child_info['checksum'],
                                                   {'url': child_info['gpgkey_url'],
                                                       'id': child_info['gpgkey_id'],
                                                       'fingerprint': child_info['gpgkey_fingerprint']})
                    if child_info['repo_url'] in existing_repo_urls:
                        # use existing repo
                        client.channel.software.associateRepo(key,
                                                              child_info['label'],
                                                              existing_repo_urls[child_info['repo_url']])
                    else:
                        client.channel.software.createRepo(key,
                                                           child_info['yum_repo_label'], repo_type,
                                                           child_info['repo_url'])
                        client.channel.software.associateRepo(key,
                                                              child_info['label'], child_info['yum_repo_label'])
                except xmlrpclib.Fault as e:
                    if e.faultCode == 1200:  # ignore if channel exists
                        sys.stderr.write("WARNING: %s: %s\n" % (
                            child_info['label'], e.faultString))
                    else:
                        sys.stderr.write("ERROR: %s: %s\n" % (
                            child_info['label'], e.faultString))
                        sys.exit(2)

            if options.key_limit is not None:
                if ('base_channel_activationkey' in child_info
                        and child_info['base_channel_activationkey']):
                    activationkey = "%s-%s" % (
                        org_id, child_info['base_channel_activationkey'])
                    if options.verbose:
                        sys.stdout.write(
                            "** Activation key '%s' - adding child channel...\n" % (
                                activationkey))
                    if not options.dry_run:
                        try:
                            client.activationkey.addChildChannels(key,
                                                                  activationkey, [child_info['label']])
                        except xmlrpclib.Fault as e:
                            sys.stderr.write("ERROR: %s: %s\n" % (
                                child_info['label'], e.faultString))

        if options.verbose:
            # an empty line after channel group
            sys.stdout.write("\n")
        existing_repo_urls = get_existing_repos(client)
    if client is not None:
        # logout
        xmlrpc_logout(client, key)
07070100000010000081FD00000000000000000000000162C3F37D00000F32000000000000000000000000000000000000001E00000000spacewalk-utils/spacewalk-api#!/usr/bin/python3
#
# Copyright (c) 2017 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

from getpass import getpass
from optparse import Option, OptionParser
import io
import re
import sys

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

def processCommandline(argv):
    optionsTable = [
        Option('--server', action='store', dest='server',
            help='URL of your Spacewalk server.'),
        Option('--user', action='store', dest='username',
            help='Name of user to log in.'),
        Option('--password', action='store', dest='password',
            help='If you do not specify this and unless --nologin is specified, you will be prompted for your password.'),
        Option('--login', action='store_true', dest='login', default=True,
            help='If we should log in or not. Default is to log in.'),
        Option('--nologin', action='store_false', dest='login',
            help='If we should log in or not. Default is to log in.'),
    ]
    optionParser = OptionParser(
        usage="Usage: %s --server=<server> [--login] [--user=<user>] [--password=<password>]" % sys.argv[0],
        option_list=optionsTable)

    options, unparsed = optionParser.parse_args(argv[1:])

    if not options.server:
        sys.stderr.write('Error: No server specified.\n')
        sys.exit(1)

    if not options.username and options.login:
        with io.TextIOWrapper(open('/dev/tty', 'r+b', buffering=0)) as tty:
            tty.write('Enter username: ')
            try:
                options.username = tty.readline()
            except KeyboardInterrupt:
                tty.write("\n")
                sys.exit(0)

    if not options.password and options.login:
        options.password = getpass('Enter your password: ')

    return options, unparsed

if __name__ == '__main__':
    options, unparsed = processCommandline(sys.argv)

    client = xmlrpclib.Server('http://%s/rpc/api' % options.server, verbose=0)
    session = ''
    if options.login:
        try:
            session = client.auth.login(options.username, options.password)
        except xmlrpclib.Fault:
            sys.stderr.write('Error: %s \n' % str(sys.exc_info()[1]))
            sys.exit(1)

    params = []
    for param in unparsed:
        if param == '%session%':
            param = session
        elif re.compile('%file:(.*)%').search(param):
            filename = re.compile('%file:(.*)%').search(param).groups()[0]
            with open(filename, 'r') as f:
                param = f.read()
        elif re.compile('%boolean:(.*)%').search(param):
                param = bool(re.compile('%boolean:(.*)%').search(param).groups()[0])
        elif re.compile('%integer:(.*)%').search(param):
                param = int(re.compile('%integer:(.*)%').search(param).groups()[0])
        elif re.compile('%string:(.*)%').search(param):
                param = re.compile('%string:(.*)%').search(param).groups()[0]
        params.append(param)

    try:
        result = getattr(client, params[0])(*params[1:])
        sys.stdout.write(str(result) + '\n')
    except xmlrpclib.Fault:
        sys.stderr.write('Fault returned from XML RPC Server: %s\n' % str(sys.exc_info()[1]))
        sys.exit(1)
    finally:
        if options.login:
            client.auth.logout(session)
07070100000011000081FD00000000000000000000000162C3F37D000063C7000000000000000000000000000000000000001900000000spacewalk-utils/taskotop#!/usr/bin/python3
#
# Displays a summary of Taskomatic activities in progress
#
# Copyright (c) 2016 SUSE LLC
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#

import datetime
import time
import struct
import signal
import sys
import curses
import logging
import argparse
import os.path

from spacewalk.server import rhnSQL

from io import BytesIO


DEFAULT_LOGFILE = './taskotop.log'

parser = argparse.ArgumentParser(description="Taskotop is a tool to monitor what taskomatic " +
                                 "is currently doing.")
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument("-e", "--each-task", action="store_true",
                    dest="eachTask",
                    help="Display most recent run for each task instead of recent task run history.")
mode_group.add_argument("-r", "--recent-history", action="store_true",
                    dest="recentHistory",
                    help="Display recent history of task runs.  This is the default display mode.")
parser.add_argument("-H", "--human-readable", action="store_true",
                    dest="humanReadable",
                    help="Use human readable time output.  Time will be displayed " +
                    "in the format [[days:]hours:]min:sec instead of total seconds.")
parser.add_argument("-m", "--max-age", type=int,
                    dest="maxAge", default=60,
                    help="Retrieve past events up to this old (in seconds, default 60). " +
                    "This has no effect if -e/--each-task is specified.")
parser.add_argument("-n", type=int,
                    dest="numIterations", default=0,
                    help="taskotop will iterate the specified number of times and then exit. " +
                    "If not specified or 0 (the default), taskotop will run until the user exits taskotop.")
parser.add_argument("-t", "--taskomatic", action="store_true",
                    dest="displayTaskomatic",
                    help="Include taskomaticd process information in the output.")
parser.add_argument("-v", "--verbose", action="count",
                    dest="verbose", default=0,
                    help="Increase log output verbosity.  Specify multiple times, up to 4 " +
                    "to increase verbosity.")
parser.add_argument("--hide-elapsed", action="store_true",
                    dest="hideElapsed",
                    help="Hide the ELAPSED column in the display.")
parser.add_argument("--show-start", action="store_true",
                    dest="showStart",
                    help="Include the START column in the display.")
parser.add_argument("--logfile", dest="logfile", default=DEFAULT_LOGFILE,
                    help="Specify logfile to use if at least one verbose arg specified.  " +
                    "Default is %s" % DEFAULT_LOGFILE)
args = parser.parse_args()

DISPLAY_TASKOMATIC = args.displayTaskomatic
DISPLAY_MODE_RECENT_HISTORY = 0
DISPLAY_MODE_EACH_TASK = 1
DISPLAY_MODE = DISPLAY_MODE_RECENT_HISTORY
SHOW_ELAPSED_TIME = True
SHOW_START_TIME = False
HUMAN_READABLE = args.humanReadable
MAXIMUM_AGE = args.maxAge
LOGGING_ENABLED = False

INTERACTIVE_HELP = [
"Help for taskotop interactive commands",
"",
"  e    Change display mode to show each task's latest run.",
"  h    Display this help page.",
"  H    Toggle human readable format.  This toggles the time display between",
"       [[days:]hours:]min:sec format and total seconds.",
"  q    Quit taskotop.",
"  r    Change display mode to show recent history of task runs.",
"  t    Toggle taskomatic process information display.",
""
]


def log_debug(msg, *args, **kwargs):
    if LOGGING_ENABLED:
        logging.debug(msg, *args, **kwargs)

def log_info(msg, *args, **kwargs):
    if LOGGING_ENABLED:
        logging.info(msg, *args, **kwargs)

def log_warning(msg, *args, **kwargs):
    if LOGGING_ENABLED:
        logging.warning(msg, *args, **kwargs)

def log_error(msg, *args, **kwargs):
    if LOGGING_ENABLED:
        logging.error(msg, *args, **kwargs)

class CursesDisplayBuilder:
    """Builder class to make laying out a curses display in a table format easier"""
    JUSTIFY_LEFT = 0
    JUSTIFY_CENTER = 1
    JUSTIFY_RIGHT = 2
    row = []

    def add_column(self, width, heading, heading_justify, data_justify, format_data_callable, data_key = ""):
        self.row.append(CursesDisplayColumn(width,
                                       heading,
                                       heading_justify,
                                       data_justify,
                                       format_data_callable,
                                       data_key))

    def string_of_length(self, length, char="*"):
        retval = ""
        for i in range(0, length):
            retval += char
        return retval

    def add_column_value_to_screen(self, screen, value, ypos, xpos, column_width, justify, maxy, maxx):
        if len(value) > column_width:
            value = self.string_of_length(column_width)
        addxpos = int(xpos + justify * (column_width - len(value)) / 2)
        # need to skip writing to the last character on the last line
        # so the cursor has a place to exist, even though its not
        # visible
        if ypos == maxy - 1 and addxpos + len(value) == maxx:
            value = value[0:-1]
        log_debug('y=%d x=%d value \'%s\'  value length %d' % (ypos, addxpos, value, len(value)))
        if len(value) > 0:
            screen.addstr(ypos, addxpos, value)
        return column_width

    def output_to_screen(self, screen, data, starty = 0):
        maxy, maxx = screen.getmaxyx()
        log_debug('maxy, maxx is %d %d' % (maxy, maxx))
        last_column_to_display = 0
        current_width = 0
        for column in self.row:
            if current_width + column.width <= maxx:
                last_column_to_display += 1
                current_width += column.width + 1
            else:
                break
            log_debug('column \'%s\': width %d, next column starts at %d' % (column.heading, column.width, current_width))

        current_row = 0
        if current_row + starty < maxy:
            current_x = 0
            for colindex in range (0, last_column_to_display):
                column = self.row[colindex]
                width = column.width
                if width == -1:
                    width = maxx - current_x
                value = column.heading
                current_x += self.add_column_value_to_screen(screen, value, current_row + starty, current_x, width, column.heading_justify, maxy, maxx) + 1
            current_row += 1

        for rowdata in data:
            if current_row + starty >= maxy:
                break
            current_x = 0
            for colindex in range (0, last_column_to_display):
                column = self.row[colindex]
                width = column.width
                if width == -1:
                    width = maxx - current_x
                value = column.format_data_callable(rowdata, column.data_key, width)
                current_x += self.add_column_value_to_screen(screen, value, current_row + starty, current_x, width, column.data_justify, maxy, maxx) + 1
            current_row += 1

        return current_row - 1

class CursesDisplayColumn:
    """Data structure for representing a column within CursesDisplayBuilder"""
    def __init__(self, width, heading, heading_justify, data_justify, format_data_callable, data_key = ""):
        self.width = width
        self.heading = heading
        self.heading_justify = heading_justify
        self.data_justify = data_justify
        self.format_data_callable = format_data_callable
        self.data_key = data_key


def get_tasko_runs_newer_than_age(maximum_age):
    """Returns data about recent Taskomatic task runs from the database."""
    task_query = rhnSQL.prepare("""
        SELECT
            task.name AS name,
            run.id AS id,
            run.start_time AS start_time,
            run.end_time AS end_time,
            schedule.data AS data

            FROM rhnTaskoRun run
                JOIN rhnTaskoSchedule schedule ON schedule.id = run.schedule_id
                JOIN rhnTaskoTemplate template ON template.id = run.template_id
                JOIN rhnTaskoTask task ON task.id = template.task_id

            WHERE
                run.start_time IS NOT NULL
                    AND (run.end_time IS NULL OR run.end_time > :timelimit)

            ORDER BY end_time DESC NULLS FIRST, start_time ASC
    """);
    # trim those older than maximum_age
    task_query.execute(timelimit = datetime.datetime.now() - datetime.timedelta(seconds=maximum_age))

    # HACK: simulate fetchall_dict() in such a way BLOBs are only read once
    # (otherwise we get exceptions)
    result = []
    row = task_query.fetchone_dict()
    while row is not None:
        row["data"] =  rhnSQL.read_lob(row["data"])
        result.append(row)
        row = task_query.fetchone_dict()
    return result


def get_tasko_runs_latest_each_task():
    """Returns data about latest of each Taskomatic task runs from the database."""
    task_query = rhnSQL.prepare("""
        SELECT
            task.name AS name,
            run.id AS id,
            run.start_time AS start_time,
            run.end_time AS end_time,
            schedule.data AS data

            FROM (SELECT template_id,
                         CASE WHEN MAX(CASE WHEN end_time IS NULL THEN 1 ELSE 0 END) = 0
                              THEN MAX(end_time)
                         END AS end_time
                      FROM rhnTaskoRun GROUP BY template_id) m
                JOIN rhnTaskoRun run ON run.template_id = m.template_id
                    AND (run.end_time = m.end_time OR (run.end_time IS NULL AND m.end_time IS NULL))
                JOIN rhnTaskoSchedule schedule ON schedule.id = run.schedule_id
                JOIN rhnTaskoTemplate template ON template.id = run.template_id
                JOIN rhnTaskoTask task ON task.id = template.task_id

            ORDER BY end_time DESC NULLS FIRST, start_time ASC
    """);

    task_query.execute()

    # HACK: simulate fetchall_dict() in such a way BLOBs are only read once
    # (otherwise we get exceptions)
    result = []
    row = task_query.fetchone_dict()
    while row is not None:
        row["data"] =  rhnSQL.read_lob(row["data"])
        result.append(row)
        row = task_query.fetchone_dict()
    return result


def get_channel_names(ids):
    """Gets the channel names corresponding to channel ids from the database."""
    if len(ids) == 0:
        return []

    query = rhnSQL.prepare("""
        SELECT DISTINCT label
            FROM rhnChannel
            WHERE id IN ({0})
            ORDER BY label
    """.format(",".join(ids)));
    query.execute()

    return [tuple[0] for tuple in query.fetchall()]

def get_current_repodata_channel_names():
    """Gets the channel names of currenlty running repodata tasks from the database."""
    query = rhnSQL.prepare("""
        SELECT DISTINCT channel_label
            FROM rhnRepoRegenQueue
            WHERE next_action IS NULL
            ORDER BY channel_label
    """);
    query.execute()

    return [row[0] for row in query.fetchall()]

def extract_channel_ids(bytes):
    """Extracts channel ids from a Java Map in serialized form."""
    # HACK: this heuristicallty looks for strings, which are marked with 't',
    # two bytes for the length and the string chars themselves. If they
    # represent numbers, we assume they are channel_ids
    # (currently this is the case)
    java_strings = []
    io = BytesIO(bytes)

    while True:
        char = io.read(1)
        if char == b"":
            break
        elif char == b"t":
            oldpos = io.tell()
            try:
                length = struct.unpack(">H", io.read(2))[0]
                java_string = struct.unpack(">{0}s".format(length), io.read(length))
                java_strings += java_string
            except struct.error:
                pass # not a real string, ignore
            io.seek(oldpos)
    # of those found, filter the ones looking like a number
    return [java_string.decode() for java_string in java_strings if java_string.isdigit()]

# column indexes for ps output
IX_PID  = 0
IX_PPID = 1
IX_PCPU = 2
IX_PMEM = 3
IX_RSS  = 4
IX_TIME = 5
# comm can have whitespace, but taskomaticd does not and
# is the only process for which we parse start time.  As
# such, comm must be the second to last column, followed
# by lstart.
IX_COMM = 6
# lstart displays in format [weekday month day time year]
# ex Mon Mar 13 06:56:22 2017
IX_START_MON = 8
IX_START_DAY = 9
IX_START_TIME = 10
IX_START_YEAR = 11

def taskomaticd_ps():
    """use ps command to retrieve pids[], cputimeseconds, cpupercent, memsize, mempercent, upsince"""
    import subprocess
    pids = []
    upsince = ""
    cputimeseconds = 0
    cpupercent = 0.0
    mempercent = 0.0
    memsize = 0
    out = subprocess.Popen(['ps', '--no-headers', '-eHo', 'pid,ppid,pcpu,pmem,rss,time,comm,lstart'],
    stdout=subprocess.PIPE).communicate()[0].splitlines()
    for line in out:
        values = line.split()
        if (values[IX_COMM] == "taskomaticd" or values[IX_PPID] in pids):
            pids.append(values[IX_PID])
            if not upsince:
                upsince = '%s %s %s %s' % (values[IX_START_DAY], values[IX_START_MON], values[IX_START_YEAR], values[IX_START_TIME])
            cputimeseconds += seconds_from_time(values[IX_TIME])
            cpupercent += float(values[IX_PCPU])
            mempercent += float(values[IX_PMEM])
            memsize += int(values[IX_RSS])
    return pids, cputimeseconds, cpupercent, memsize, mempercent, upsince

def seconds_from_time(time):
    """convert time in [DD-]hh:mm:ss format to total seconds"""
    parts = time.split(':')
    days = 0
    hours = 0
    min = int(parts[1])
    sec = int(parts[2])
    if '-' in parts[0]:
        dh = parts[0].split('-')
        days = int(dh[0])
        hours = int(dh[1])
    else:
        hours = int(parts[0])
    return (((((days * 24) + hours) * 60) + min) * 60) + sec

def add_line(screen, line, ypos):
    """Truncate the given line and add it to the screen at the specified position"""
    maxy, maxx = screen.getmaxyx()
    last_line = 0
    if ypos >= maxy:
        return 0
    # need to skip writing to the last character on the last line
    # so the cursor has a place to exist, even though its not
    # visible.  If last_line is 1, we truncate that last character
    # from the line being added to the screen.
    if ypos == maxy - 1:
        last_line = 1
    if len(line) > maxx - last_line:
        line = line[0:maxx - last_line]
    screen.addstr(ypos, 0, line)
    return 1

def show_taskomatic_header(screen):
    """Get taskomatic telemetry and display up to maxy, maxx"""
    ypos = 0
    try:
        pids, cputimeseconds, cpupercent, memsize, mempercent, upsince = taskomaticd_ps()
        line = 'taskomaticd is not running'

        if (len(pids) > 0):
            line = 'taskomaticd pid: %s  up since: %s  child processes: %d' % (pids[0], upsince, len(pids) - 1)
        ypos += add_line(screen, line, ypos)

        if HUMAN_READABLE:
            line = 'cpu %%: %2.1f  cpu total time: %s' % (cpupercent, seconds_to_hms_string(cputimeseconds))
        else:
            line = 'cpu %%: %2.1f  cpu total seconds: %d' % (cpupercent, cputimeseconds)
        ypos += add_line(screen, line, ypos)

        line = 'mem %%: %2.1f  mem total (kB): %d' % (mempercent, memsize)
        ypos += add_line(screen, line, ypos)
    except Exception:
        log_error('failed to issue ps command to retrieve taskomaticd information')
        line = 'failed to issue ps command to retrieve taskomaticd information'
        ypos += add_line(screen, line, ypos)
    return ypos

def display_interactive_help(screen):
    """Display the interactive help on the screen"""
    log_debug("Displaying interactive help")
    while True:
        screen.erase()
        maxy, maxx = screen.getmaxyx()
        ypos = 0
        for line in INTERACTIVE_HELP:
            if ypos > maxy - 2:
                break
            ypos += add_line(screen, line, ypos)
        presskey_text = "Press any key "
        ypos += add_line(screen, presskey_text, ypos)
        screen.refresh()
        try:
            c = screen.getch()
            if c > -1 and c < 256:
                break
        except Exception:
            pass

def process_interactive_input(c, screen):
    """Process the interactive input from the user"""
    global DISPLAY_MODE
    global HUMAN_READABLE
    global DISPLAY_TASKOMATIC
    if c == ord('q'):
        system_exit(0)
    elif c == ord('e'):
        DISPLAY_MODE = DISPLAY_MODE_EACH_TASK
        log_debug('Display Mode is now Each Task')
    elif c == ord('r'):
        DISPLAY_MODE = DISPLAY_MODE_RECENT_HISTORY
        log_debug('Display Mode is now Recent History')
    elif c == ord('H'):
        HUMAN_READABLE = not HUMAN_READABLE
        log_debug('HUMAN_READABLE is now %s' % HUMAN_READABLE)
    elif c == ord('t'):
        DISPLAY_TASKOMATIC = not DISPLAY_TASKOMATIC
        log_debug('DISPLAY_TASKOMATIC is now %s' % DISPLAY_TASKOMATIC)
    elif c == ord('h'):
        # turn off half delay with nocbreak(), then
        # turn cbreak back on so no need to hit 'enter'
        curses.nocbreak()
        curses.cbreak()
        curses.curs_set(1)
        display_interactive_help(screen)
        curses.curs_set(0)
        curses.halfdelay(10)

def seconds_to_hms_string(seconds):
    result, s = divmod(seconds, 60)
    result, m = divmod(result, 60)
    result, h = divmod(result, 24)

    retval = ""
    if result > 0:
        retval += '%d:' % result
    if h > 0:
        retval += '%02d:' % h
    retval += '%02d:' % m
    retval += '%02d' % s
    return retval

def format_elapsed_time(rowdata, data_key, width):
    """Formats the elapsed time for display."""
    end = datetime.datetime.now()
    if rowdata["end_time"]:
        end = rowdata["end_time"]

    td = end.replace(tzinfo=None) - rowdata["start_time"].replace(tzinfo=None)
    seconds = int((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6)
    if HUMAN_READABLE:
        return seconds_to_hms_string(seconds)
    return "{0}s".format(seconds)

def format_datetime(rowdata, data_key, width):
    if rowdata[data_key]:
        return rowdata[data_key].strftime('%Y-%m-%d %H:%M:%S')
    return ""

def format_start_time(rowdata, data_key, width):
    if rowdata["start_time"]:
        return format_datetime(rowdata, "start_time", width)
    return ""

def format_end_time(rowdata, data_key, width):
    if rowdata["end_time"]:
        return format_datetime(rowdata, "end_time", width)
    if rowdata["start_time"]:
        if SHOW_ELAPSED_TIME:
            return"(running)"
        return "(running {0})".format(format_elapsed_time(rowdata, "", width))
    return ""

def format_channel_names(rowdata, data_key, width):
    """format channel names for display"""
    channel_names = []
    if rowdata["data"]:
        channel_names = get_channel_names(extract_channel_ids(rowdata["data"]))
    if rowdata["name"] == "channel-repodata" and not rowdata["end_time"]:
        channel_names = get_current_repodata_channel_names()
    retval = ", ".join(channel_names)
    if len(retval) > width:
        retval = retval[0:width]
    return retval

def format_int(rowdata, data_key, width):
    """format an int for display"""
    return "{id:d}".format(**rowdata)

def format_string(rowdata, data_key, width):
    """format a string for display"""
    return rowdata[data_key]


def main(screen):
    """Computes and displays runs every second."""
    curses.halfdelay(10)

    rhnSQL.initDB()

    # exit gracefully on ctrl-c
    signal.signal(signal.SIGINT, lambda signal, frame: sys.exit(0))

    # hide cursor
    curses.curs_set(0)

    # set up curses display builder
    display_builder = CursesDisplayBuilder()
    display_builder.add_column(11,
                            "RUN ID",
                            display_builder.JUSTIFY_RIGHT,
                            display_builder.JUSTIFY_LEFT,
                            format_int,
                            "id"),
    display_builder.add_column(30,
                            "TASK NAME",
                            display_builder.JUSTIFY_RIGHT,
                            display_builder.JUSTIFY_RIGHT,
                            format_string,
                            "name"),
    if SHOW_START_TIME:
        display_builder.add_column(19,
                            "START",
                            display_builder.JUSTIFY_CENTER,
                            display_builder.JUSTIFY_RIGHT,
                            format_start_time),
    if SHOW_ELAPSED_TIME:
        display_builder.add_column(9,
                            "ELAPSED",
                            display_builder.JUSTIFY_RIGHT,
                            display_builder.JUSTIFY_RIGHT,
                            format_elapsed_time),
    display_builder.add_column(19,
                            "END",
                            display_builder.JUSTIFY_CENTER,
                            display_builder.JUSTIFY_RIGHT,
                            format_end_time),
    display_builder.add_column(-1,
                            "CHANNEL",
                            display_builder.JUSTIFY_CENTER,
                            display_builder.JUSTIFY_RIGHT,
                            format_channel_names)

    num_iterations = args.numIterations
    if args.numIterations == 0:
        num_iterations = -1
    while num_iterations != 0:
        if num_iterations > 0:
            num_iterations -= 1
            log_debug('updating screen (%d remaining)' % num_iterations)
        else:
            log_debug("updating screen")
        screen.erase()
        current_y = 0
        if DISPLAY_TASKOMATIC:
            current_y += show_taskomatic_header(screen) + 1
        result = []
        if DISPLAY_MODE == DISPLAY_MODE_EACH_TASK:
             result = get_tasko_runs_latest_each_task()
        else:
             result = get_tasko_runs_newer_than_age(MAXIMUM_AGE)
        display_builder.output_to_screen(screen, result, current_y)

        screen.refresh()
        try:
            # given up to a 1 second to retrieve user input
            # because of halfdelay(10) call earlier
            c = screen.getch()
            if c != curses.ERR:
                process_interactive_input(c, screen)
        except Exception:
            e = sys.exc_info()[1]
            log_warning('getch() exception %s' % e)
            pass


def system_exit(code, msgs=None):
    """Exit with a code and optional message(s). Saved a few lines of code."""
    if msgs:
        if type(msgs) not in [type([]), type(())]:
            msgs = (msgs,)
        for msg in msgs:
            sys.stderr.write(str(msg) + '\n')
    sys.exit(code)

if args.numIterations < 0:
    system_exit(2, "ERROR: NUMITERATIONS must not be a negative value")

if args.maxAge < 0:
    system_exit(2, "ERROR: MAXAGE must not be a negative value")

if args.logfile != DEFAULT_LOGFILE and args.verbose == 0:
    system_exit(2, "ERROR: --logfile command line option requires use of -v or --verbose option")

if args.verbose > 0:
    LOGGING_ENABLED = True
    loglevel = logging.NOTSET
    if args.verbose == 1:
        loglevel = logging.ERROR
    elif args.verbose == 2:
        loglevel = logging.WARNING
    elif args.verbose == 3:
        loglevel = logging.INFO
    else:
        loglevel = logging.DEBUG
    dirname = os.path.dirname(args.logfile)
    if dirname != '' and not os.path.isdir(os.path.dirname(args.logfile)):
        system_exit(1, "ERROR: Directory %s in specified logfile doesn't exist" % dirname)
    try:
        lfile = open(args.logfile, 'a')
        lfile.write("Logging started with verbosity %d on %s\n" % (args.verbose,  time.strftime("%c")))
        lfile.write("taskotop command args: %s\n" % ' '.join(sys.argv[1:]))
        lfile.close()
    except Exception:
        system_exit(1, "ERROR: Failed to open and write to logfile %s" % args.logfile)

    logging.basicConfig(filename=args.logfile, level=loglevel, format='%(asctime)s %(levelname)s:%(message)s')
    log_debug('HUMAN_READABLE is %s' % HUMAN_READABLE)
    log_debug('MAXIMUM_AGE is %d' % MAXIMUM_AGE)

if args.eachTask:
    DISPLAY_MODE = DISPLAY_MODE_EACH_TASK
    log_debug('Display Mode is Each Task')
if args.recentHistory:
    DISPLAY_MODE = DISPLAY_MODE_RECENT_HISTORY
    log_debug('Display Mode is Recent History')
if args.showStart:
    SHOW_START_TIME = True
    log_debug('START column will be displayed')
if args.hideElapsed:
    SHOW_ELAPSED_TIME = False
    log_debug('ELAPSED column will be hidden')

try:
    curses.wrapper(main)
except rhnSQL.SQLConnectError:
    e = sys.exc_info()[1]
    system_exit(20, ["ERROR: Can't connect to the database: %s" % str(e), "Check if your database is running."])
except Exception:
    e = sys.exc_info()[1]
    system_exit(1, "ERROR: Some problems occurred during getting information about tasks: %s" % str(e))


07070100000012000081B400000000000000000000000162C3F37D0000215C000000000000000000000000000000000000001D00000000spacewalk-utils/depsolver.py#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012--2017 Red Hat, Inc.
#
# Lookup package dependencies in a yum repository
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation

import logging
import sys
from optparse import OptionParser  # pylint: disable=deprecated-module
import os
import solv
import yaml


try:
    from spacewalk.satellite_tools.progress_bar import ProgressBar
except ImportError:
    # pylint: disable=F0401
    _LIBPATH = "/usr/share/rhn"
    if _LIBPATH not in sys.path:
        sys.path.append(_LIBPATH)
    from satellite_tools.progress_bar import ProgressBar


log = logging.getLogger(__name__)

CACHE_DIR = "/tmp/cache/yum"
PERSIST_DIR = "/var/lib/yum"


class DepSolver:

    def __init__(self, repos, pkgs_in=None, quiet=True):
        self._override_sigchecks = False
        self.quiet = quiet
        self.pkgs = pkgs_in or []
        self.repos = repos
        self.pool = solv.Pool()
        self.setup()

    def setPackages(self, pkgs_in):
        self.pkgs = pkgs_in

    def setup(self):
        """
         Load the repos into repostore to query package dependencies
        """
        for repo in self.repos:
            solv_repo = self.pool.add_repo(str(repo['id']))
            solv_path = os.path.join(repo['relative_path'], 'solv')
            if not os.path.isfile(solv_path) or not solv_repo.add_solv(solv.xfopen(str(solv_path)), 0):
                raise Exception("Repository solv file cannot be found at: {}".format(solv_path))
        self.pool.addfileprovides()
        self.pool.createwhatprovides()

    def getDependencylist(self):
        """
         Get dependency list and suggested packages for package names provided.
         The dependency lookup is only one level in this case.
         The package name format could be any of the following:
         name, name.arch, name-ver-rel.arch, name-ver, name-ver-rel,
         epoch:name-ver-rel.arch, name-epoch:ver-rel.arch
        """
        pkgselection = self.pool.Selection()
        flags = solv.Selection.SELECTION_NAME|solv.Selection.SELECTION_PROVIDES|solv.Selection.SELECTION_GLOB
        flags |= solv.Selection.SELECTION_CANON|solv.Selection.SELECTION_DOTARCH|solv.Selection.SELECTION_ADD
        for pkg in self.pkgs:
            pkgselection.select(pkg, flags)
        return self.__locateDeps(pkgselection.solvables())

    def getRecursiveDepList(self):
        """
         Get dependency list and suggested packages for package names provided.
         The dependency lookup is recursive. All available packages in the repo
         are returned matching whatprovides.
         The package name format could be any of the following:
         name, name.arch, name-ver-rel.arch, name-ver, name-ver-rel,
         epoch:name-ver-rel.arch, name-epoch:ver-rel.arch
         returns a dictionary of {'n-v-r.a' : [n,v,e,r,a],...}
        """
        solved = []
        to_solve = self.pkgs
        all_results = {}

        while to_solve:
            log.debug("Solving %s \n\n", to_solve)
            results = self.getDependencylist()
            all_results.update(results)
            found = self.processResults(results)[0]
            solved += to_solve
            to_solve = []
            for _dep, pkgs in list(found.items()):
                for pkg in pkgs:
                    solved = list(set(solved))
                    if str(pkg) not in solved:
                        to_solve.append(str(pkg))
            self.pkgs = to_solve
        return all_results

    def __locateDeps(self, pkgs):
        results = {}

        if not self.quiet:
            print(("Solving Dependencies (%i): " % len(pkgs)))
            pb = ProgressBar(prompt='', endTag=' - complete',
                             finalSize=len(pkgs), finalBarLength=40, stream=sys.stdout)
            pb.printAll(1)

        for pkg in pkgs:
            if not self.quiet:
                pb.addTo(1)
                pb.printIncrement()
            results[pkg] = {}
            reqs = pkg.lookup_deparray(solv.SOLVABLE_REQUIRES)
            pkgresults = results[pkg]
            for req in reqs:
                pkgresults[req] = self.pool.whatprovides(req)
        if not self.quiet:
            pb.printComplete()
        return results

    @staticmethod
    def processResults(results):
        reqlist = {}
        notfound = {}
        for pkg in results:
            if not results[pkg]:
                continue
            for req in results[pkg]:
                rlist = results[pkg][req]
                if not rlist:
                    # Unsatisfied dependency
                    notfound[str(req)] = []
                    continue
                reqlist[str(req)] = rlist
        found = {}
        for req, rlist in list(reqlist.items()):
            found[req] = []
            for r in rlist:
                dep = [r.name, r.evr, r.arch]
                if dep not in found[req]:
                    found[req].append(dep)
        return found, notfound

    @staticmethod
    def printable_result(results):
        print_doc_str = ""
        for pkg in results:
            if not results[pkg]:
                continue
            for req in results[pkg]:
                rlist = results[pkg][req]
                print_doc_str += "\n dependency: %s \n" % req
                if not rlist:
                    # Unsatisfied dependency
                    print_doc_str += "   Unsatisfied dependency \n"
                    continue

                for po in rlist:
                    print_doc_str += "   provider: %s\n" % str(po)
        return print_doc_str


if __name__ == '__main__':
    parser = OptionParser(usage="Usage: %prog [repoid] [repodata_path] [pkgname1] [pkgname2] ... [pkgnameM]")
    parser.add_option("-i", "--input-file", action="store",
                      help="YAML file to use as input. This would ignore all other input passed in the command line")
    parser.add_option("-y", "--output-yaml", action="count", help="Produce a YAML formatted output")
    (options, _args) = parser.parse_args()

    arg_repo = []
    arg_pkgs = []

    if options.input_file:
        # Example of YAML input file:
        #
        # repositories:
        #   sles12-sp3-pool-x86_64: /var/cache/rhn/repodata/sles12-sp3-pool-x86_64/
        #   sles12-sp3-updates-x86_64: /var/cache/rhn/repodata/sles12-sp3-updates-x86_64/
        #
        # packages:
        #   - libapr-util1-1.5.3-2.3.1.x86_64
        #   - apache2-utils-2.4.23-29.3.2.x86_64
        #   - python3-base
        #   - apache2-utils
        #
        try:
            repo_cfg = yaml.load(open(options.input_file))
            for repository in repo_cfg['repositories']:
                arg_repo.append({'id': repository, 'relative_path': repo_cfg['repositories'][repository]})
            arg_pkgs = repo_cfg['packages']
        except Exception as exc:  # pylint: disable=broad-except
            parser.error("Error reading input file: {}".format(exc))
            sys.exit(1)
    elif len(_args) >= 3:
        arg_repo = [{'id': _args[0],
                     'relative_path': _args[1] }]  # path to where repodata is located
        arg_pkgs = _args[2:]
    else:
        parser.error("Wrong number of arguments")
        sys.exit(1)

    dsolve = DepSolver(arg_repo, arg_pkgs, quiet=options.output_yaml)
    deplist = dsolve.getDependencylist()

    if options.output_yaml:
        output = {
            'packages': [],
            'dependencies' : {}
        }
        for package in deplist:
            pkg_tag = str(package)
            output['packages'].append(pkg_tag)
            output['dependencies'][pkg_tag] = {}
            for dependency in deplist[package]:
                output['dependencies'][pkg_tag][str(dependency)] = [str(x) for x in deplist[package][dependency]]
        sys.stdout.write(yaml.dump(output))
    else:
        result_set = dsolve.processResults(deplist)
        print(result_set)
        print("Printable dependency Results: \n\n %s" % dsolve.printable_result(deplist))
07070100000013000081B400000000000000000000000162C3F37D0000123B000000000000000000000000000000000000002800000000spacewalk-utils/migrateSystemProfile.py"""
Multi-Org: Script to migrate server from one org to another

Script that connects to a given satellite db and migrates the
server and its history from source org to the destination org.

Copyright (c) 2008--2015 Red Hat, Inc.  Distributed under GPL.
Author: Pradeep Kilambi <pkilambi@redhat.com>

"""

import csv
import os
import sys
try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

from optparse import OptionParser, Option  # pylint: disable=deprecated-module
from uyuni.common.cli import getUsernamePassword, xmlrpc_login, xmlrpc_logout

_topdir = '/usr/share/rhn'
if _topdir not in sys.path:
    sys.path.append(_topdir)

client = None
DEBUG = 0

options_table = [
    Option("-v", "--verbose",       action="count",
           help="Increase verbosity"),
    Option("-u", "--username",       action="store",
           help="Satellite/Org Admin username"),
    Option("-p", "--password",       action="store",
           help="Satellite/Org Admin password"),
    Option("--satellite",       action="store",
           help="Satellite server to run migration"),
    Option("--systemId",               action="append",
           help="client system to migrate"),
    Option("--to-org-id",          action="store",
           help="Destination Org ID"),
    Option("--csv",                action="store",
           help="CSV File to process"),
]

_csv_fields = ['systemId', 'to-org-id']

def main():
    global client, DEBUG
    parser = OptionParser(option_list=options_table)

    (options, _args) = parser.parse_args()

    if options.satellite:
        satellite_host = options.satellite
    else:
        satellite_host = os.uname()[1]

    if options.verbose:
        DEBUG = 1

    satellite_url = "http://%s/rpc/api" % satellite_host
    if DEBUG:
        print("Connecting to %s" % satellite_url)

    client = xmlrpclib.Server(satellite_url, verbose=0)

    if options.csv:
        migrate_data = read_csv_file(options.csv)
    else:
        migrate_data = []

    if not options.csv:
        if not options.systemId:
            print("Missing --systemId")
            return 1

        if not options.to_org_id:
            print("Missing Destination org id")
            return None
        else:
            to_org_id = options.to_org_id or None

        migrate_data = [[options.systemId, to_org_id]]

    username, password = getUsernamePassword(options.username,
                                             options.password)

    sessionKey = xmlrpc_login(client, username, password)

    if not migrate_data:
        sys.stderr.write("Nothing to migrate. Exiting.. \n")
        sys.exit(1)

    for server_id, to_org_id in migrate_data:
        if isinstance(server_id, type([])):
            server_id = list(map(int, server_id))
        else:
            server_id = [int(server_id)]
        try:
            migrate_system(sessionKey, int(to_org_id), server_id)
        except Exception:  # pylint: disable=try-except-raise
            raise

    if DEBUG:
        print("Migration Completed successfully")
    xmlrpc_logout(client, sessionKey)

    return None

def migrate_system(key, newOrgId, server_ids):
    """
    Call to migrate given system to new org
    """
    if DEBUG:
        print("Migrating systemIds %s to Org %s" % (server_ids, newOrgId))
    try:
        client.org.migrateSystems(key, newOrgId, server_ids)
    except xmlrpclib.Fault as e:
        sys.stderr.write("Error: %s\n" % e.faultString)
        sys.exit(-1)



def lookup_server(key, from_org_id):
    # Get the org id
    # TODO: replace with an api call
    rows = client.org.listServerByOrg(key, from_org_id)
    if not rows:
        sys.stderr.write("No Systems registered for Org-ID %s \n" % from_org_id)
        sys.exit(1)
    print("                                    ")
    print("Available Systems for Org-ID: %s " % from_org_id)
    print("------------------------------------")
    print(" Server-ID      Server-Name         ")
    print("------------------------------------")
    for row in rows:
        print(" %s   %s " % (row['id'], row['name']))
    print("--------------------------------------------")

    return rows


def read_csv_file(csv_file):
    """
     Parse the fields in the given csv
    """
    csv_data = []
    f_csv = open(csv_file)
    reader = csv.reader(f_csv)
    for data in reader:
        if len(data) != len(_csv_fields):
            sys.stderr.write("Invalid Data.Skipping line .. \n"
                             % data)
            continue
        csv_data.append(data)
    return csv_data

if __name__ == '__main__':
    sys.exit(main() or 0)
07070100000014000081B400000000000000000000000162C3F37D00004501000000000000000000000000000000000000002D00000000spacewalk-utils/spacewalk-clone-by-date.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-clone-by-date</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-clone-by-date</command></RefName>
<RefPurpose>
<emphasis>spacewalk-clone-by-date</emphasis> is DEPRECATED. Please use Content Lifecycle Management Tool.
Utility for cloning errata by date (For RHEL5 and above).
</RefPurpose>
</RefNameDiv>


<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-clone-by-date</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-a <replaceable><optional>SRC_PARENT</optional> DEST_PARENT</replaceable></arg>
	<arg>--parents=<replaceable><optional>SRC_PARENT</optional> DEST_PARENT</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-b <replaceable>PKG1,PKG2,PKG3</replaceable></arg>
        <arg>--blacklist=<replaceable>PKG1,PKG2,PKG3</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-c <replaceable>CONFIGFILE</replaceable></arg>
        <arg>--config=<replaceable>CONFIGFILE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-d=<replaceable>YYYY-MM-DD</replaceable></arg>
        <arg>--to_date=<replaceable>YYYY-MM-DD</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-e</arg><arg>--errata</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg><arg>--help</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-j</arg><arg>--dry-run</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-k</arg><arg>--skip_depsolve</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-l <replaceable>SRC DEST <optional>DEST_NAME <optional>DEST_SUMMARY <optional>DEST_DESCRIPTION</optional></optional></optional></replaceable></arg>
	<arg>--channels=<replaceable>SRC DEST <optional>DEST_NAME <optional>DEST_SUMMARY <optional>DEST_DESCRIPTION</optional></optional></optional></replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-m</arg><arg>--sample-config</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-n</arg><arg>--no-errata-sync</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-o</arg><arg>--security_only</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-p <replaceable>PASSWORD</replaceable></arg>
        <arg>--password=<replaceable>PASSWORD</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-r <replaceable>PKG1,PKG2,PKG3</replaceable></arg>
        <arg>--removelist=<replaceable>PKG1,PKG2,PKG3</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-u <replaceable>USERNAME</replaceable></arg>
        <arg>--username=<replaceable>USERNAME</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-v</arg><arg>--validate</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-x</arg><arg>--skip-errata-depsolve</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-y</arg><arg>--assumeyes</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-z</arg><arg>--use-update-date</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>

    <para>
        <emphasis>spacewalk-clone-by-date</emphasis> is DEPRECATED. Please use Content Lifecycle Management Tool.
    </para>
    <para>
        <emphasis>spacewalk-clone-by-date</emphasis> tool, clones all the errata belonging to a specified channel, as of a specific date.
    </para>

    <para>
        Script to clone all errata belonging to software channels as of a specific date, attempting to ensure any added packages have their dependencies satisfied. Any destination channels that do not exist will be created.
    </para>
    <para>
        By specifying channels on the command line, only a single channel tree (a base channel and its children) can be cloned with a single command. If you would like to specify multiple trees within a single command, simply use a configuration file.  See --sample-config for a sample.
    </para>
    <para>
        All options can either be specified in the configuration file or via command line.  Any option specified via command line will override a configuration file value with the exception of channels. If a configuration file is specified, --channels is not a valid command line argument.
    </para>
    <para>
        Please note that spacewalk-clone-by-date will pull in any packages or errata needed to resolve dependencies for any errata being cloned. It is therefore possible for the cloned channel(s) to end up with errata and packages which are newer than the specified date. This is expected behavior.
    </para>
    <para>
        Spacewalk-clone-by-date is tool that provides a best-effort attempt at cloning valid and dependency-complete channels. However, there is not enough information available to guarantee that the results will always be completely dependency-complete, especially if used with the --errata, --security_only, --blacklist, or --removelist options. This is a tool to assist Administrators with channel creation, it cannot replace them. Cases where spacewalk-clone-by-date cannot completely resolve all potential dependencies include but are not limited to: if the original channel is not dependency-complete, if the original channel does not have or contains incomplete errata information, or if packages are not strictly dependent on each other but conflict with specific versions.
    </para>
    <para>
	Note: Unlike previous versions spacewalk-clone-by-date now clones entire errata that are determined to be required when doing dependency resolution, not just single packages. This solves a number of dependency-completeness problems but is still not a perfect solution. This may mean that errata end up being cloned that do not meet your specifications (for example, that were added after the --to_date or are not security errata when used with --security_only). This is expected behavior. The only way to avoid this is to run with --skip_depsolve and manually resolve any dependency cloning that may be required.
    </para>
    <para>
        The addition of the --skip-errata-depsolve switch allows the user to control whether adding an erratum due to dependency resolution, will add all of that erratum's packages to the dependency-resolution list. The default is to add all such discovered packages. As for dependency-resolution in general, this may result in more packages and errata being added to the cloned channel than desired. The user can avoid this by specifying --skip-errata-depsolve, manually resolving any dependency issues that may be encountered.
    </para>
    <para>
	Note: spacewalk-clone-by-date does not support RHEL4 and earlier versions. It's a limitation of this tool. Spacewalk-clone-by-date is intended for use with Red Hat channels. There is no guarantee that it will work properly with channels populated with content from third party sources.
    </para>

</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-a <replaceable><optional>SRC_PARENT</optional> DEST_PARENT</replaceable>, --parents=<replaceable><optional>SRC_PARENT</optional> DEST_PARENT</replaceable></term>
        <listitem>
            <para>The parent channels that will be used for dependency resolution and that newly cloned channels will be created as children of. No changes will be made to these channels unless dependency resolution demands it. If only the destination parent channel is specified then the source parent channel will be automatically chosen as the channel the destination parent channel was cloned from. Both of these channels must already exist. The config file equivalent to this option is specifying '"existing-parent-do-not-modify": true' for a parent channel.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-b <replaceable>PKG1,PKG2,PKG3</replaceable>, --blacklist=<replaceable>PKG1,PKG2,PKG3</replaceable></term>
        <listitem>
            <para>Comma separated list of Perl-style regular expressions used to match package names to be removed after cloning.  Only newly added packages will removed if they are on the blacklist.  <emphasis>Dependency resolution is not ensured on resulting repository.</emphasis>
            </para>
            <para>
                Note:  This option is global across all destination channels.  To specify per channel blacklists, you must use a configuration file.  See --sample-config for an example.
            </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-c <replaceable>FILE</replaceable>, --config=<replaceable>FILE</replaceable></term>
        <listitem>
            <para>Configuration file holding parameters, see --sample-config for an example.
                      Any command-line parameters override those in the specified config file.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-d <replaceable>YYYY-MM-DD</replaceable>, --to_date=<replaceable>YYYY-MM-DD</replaceable></term>
        <listitem>
            <para>All errata on or before the specified date will be cloned if it does not already exist in the destination channel(s). If this option is omitted no errata will be cloned (unless dependency resolution demands it).
            </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-e  --errata</term>
        <listitem>
            <para>Clone only specified errata (and their dependencies). A comma-separated list (e.g. --errata=RHSA-2014:0043,RHBA-2014:0085).</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-j --dry-run</term>
        <listitem>
            <para>Creates a file for each pair of channels in the working directory that comprises the list of erratas that are to be cloned. No actual errata cloning takes place.</para>
            <para>Warning: If some of the destination channels do not exist, they will be created with the original package set.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-k  --skip_depsolve</term>
        <listitem>
            <para>Do not attempt to ensure added packages have needed dependencies (Not recommended). </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-l <replaceable>SRC_LABEL DEST_LABEL <optional>DEST_NAME <optional>DEST_SUMMARY <optional>DEST_DESCRIPTION</optional></optional></optional></replaceable>, --channels=<replaceable>SRC_LABEL DEST_LABEL <optional>DEST_NAME <optional>DEST_SUMMARY <optional>DEST_DESCRIPTION</optional></optional></optional></replaceable></term>
        <listitem>
            <para>Space separated list of source channel and destination channel.  Can be specified multiple times to provide base channel and child channel pairs of a single channel tree.  Can optionally include destination channel name, summary, and description, in that order.  To specify more than one channel tree, specify a config file.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-m  --sample-config</term>
        <listitem>
            <para>Generate a sample configuration file.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-n --no-errata-sync</term>
        <listitem>
            <para>Do not automatically synchronize the package list of cloned errata with their originals. This may make spacewalk-clone-by-date have unexpected results if the original errata have been updated (e.g.: syncing another architecture for a channel) since the cloned errata were created. If omitted we will synchronize the cloned errata with the originals to ensure the expected packages are included (default).
            </para>
	    <para>Errata clones do not necessarily get updated by satellite-syncing new architectures of existing channels. Another example would be if the errata contains some packages in one channel (say the base channel) and additional packages in another channel (say a child channel). Or it may be due to simply adding or removing packages from a custom errata that you have already created a clone of. Since cloned errata are not automatically updated when the original errata are, over time they may become out-of-sync and require synchronization in order for associating the cloned errata with a cloned channel to have the normally expected effect.
            </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-o  --security_only</term>
        <listitem>
            <para>Clone only security errata (and their dependencies).</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-p <replaceable>PASSWORD</replaceable>, --password=<replaceable>PASSWORD</replaceable></term>
        <listitem>
            <para>password of user that has administrative access.  If not provided, password will be prompted for.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-r <replaceable>PKG1,PKG2,PKG3</replaceable>, --removelist=<replaceable>PKG1,PKG2,PKG3</replaceable></term>
        <listitem>
            <para>Comma separated list of Perl-style regular expressions used to match package names to be removed after cloning.  All packages are considered for removal, even those not added by errata/channel cloning.  <emphasis>Dependency resolution is not ensured on resulting repository.</emphasis>
            </para>
            <para>
                Note:  This option is global across all destination channels. To specify per channel removelists, you must use a configuration file.  See --sample-config for an example.
            </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-u <replaceable>USERNAME</replaceable>, --username=<replaceable>USERNAME</replaceable></term>
        <listitem>
            <para>username of user that has administrative access.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-v  --validate</term>
        <listitem>
            <para>Run repoclosure on all provided channels.  This overrides --to_date and will not perform any cloning.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-x --skip-errata-depsolv</term>
        <listitem>
            <para>When pulling in an erratum to satisfy dependency-resolution, <emphasis>DO NOT</emphasis> add that erratum's packages to the list of packages to do dependency-resolution against. This will result in fewer packages/errata being included for dependency-resolution (sometimes <emphasis>MANY</emphasis> fewer) at the possible expense of a cloned channel that is not dependency-complete. If omitted, we will add an erratum's packages to the list required for dependency-resolution and recurse on the list (default).
            </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-y  --assumeyes</term>
        <listitem>
            <para>Instead of asking for confirmation before cloning a channel or errata, continue uninterrupted.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-z  --use-update-date</term>
        <listitem>
            <para>While cloning errata by date, clone all errata that have last been updated on or before the date provided by to_date. If omitted will use issue date of errata (default).</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>


<RefSect1><Title>Examples</Title>
<example>
    <title>Clone a base channel and child channel to 2008-12-20 with a blacklist excluding all versions of squid, and all versions of any package that starts with 'sendmail'.</title>
    <para>spacewalk-clone-by-date  --channels=rhel-x86_64-server-5 clone-rhel --channels=rhn-tools-rhel-x86_64-server-5 clone-tools 'Clone Tools Name' 'Clone Tools Summary' 'Clone Tools Description'  --username admin --password redhat --to_date=2008-12-20 --blacklist='sendmail.*,squid,compat-libstdc\+\+'</para>
</example>
<example>
    <title>Clone with options completely from a config file.</title>
    <para>spacewalk-clone-by-date  --config=/etc/clone.conf</para>
</example>
<example>
    <title>Clone while overriding some options from the command line.</title>
    <para>spacewalk-clone-by-date  --config=/etc/clone.conf --username rocky --password squirrel --to_date=2010-10-09</para>
</example>
</RefSect1>

<RefSect1><Title>Log Output</Title>
    <para>
        In addition to writing to stdout, spacewalk-clone-by-date writes a detailed description of every run to a log file. This file can be found at <emphasis>/var/log/rhn/errata-clone.log</emphasis>
        Log output includes:
    </para>
    <itemizedlist>
        <listitem><para>The command-line arguments or config-file contents used for the run</para></listitem>
        <listitem><para>The output of each round of dependency-checking that adds packages/errata</para></listitem>
        <listitem><para>The complete list of all errata and packages cloned into each channel</para></listitem>
    </itemizedlist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Justin Sherrill <email>jsherrill@redhat.com</email></member>
    <member>Stephen Herr <email>sherr@redhat.com</email></member>
    <member>Grant Gainey <email>ggainey@redhat.com</email></member>
</simplelist>
</RefSect1>
</refentry>
07070100000015000081B400000000000000000000000162C3F37D000011BF000000000000000000000000000000000000002F00000000spacewalk-utils/spacewalk-hostname-rename.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
<!ENTITY SCRIPTCOMMAND "spacewalk-hostname-rename">
<!ENTITY PRODUCT "Red Hat Satellite or Spacewalk server">

]>
<RefEntry>

<RefMeta>
    <RefEntryTitle>&SCRIPTCOMMAND;</RefEntryTitle>
    <ManVolNum>8</ManVolNum>
    <RefMiscInfo>Version 0.1</RefMiscInfo>
</RefMeta>

<RefNameDiv>
    <RefName><command>&SCRIPTCOMMAND;</command></RefName>
    <RefPurpose>
    Reconfigures &PRODUCT; to use a different hostname/ip address.
    </RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
    <Synopsis>
        <cmdsynopsis>
            <command>&SCRIPTCOMMAND;</command>
            <arg choice='plain'><replaceable>IP_ADDRESS</replaceable></arg>
            <arg>--ssl-country=<replaceable>SSL_COUNTRY</replaceable></arg>
            <arg>--ssl-state=<replaceable>SSL_STATE</replaceable></arg>
            <arg>--ssl-org=<replaceable>SSL_ORG</replaceable></arg>
            <arg>--ssl-orgunit=<replaceable>SSL_ORGUNIT</replaceable></arg>
            <arg>--ssl-email=<replaceable>SSL_EMAIL</replaceable></arg>
            <arg>--ssl-ca-password=<replaceable>SSL_CA_PASSWORD</replaceable></arg>
        </cmdsynopsis>
        <cmdsynopsis>
            <arg>-h</arg> <arg>--help</arg>
        </cmdsynopsis>
    </Synopsis>
</RefSynopsisDiv>

<RefSect1>
    <Title>Description</Title>
    <para> After a system hostname or default IP gets changed on a &PRODUCT;, it is necessary to run &SCRIPTCOMMAND; to reconfigure to the new settings. Afterwards it may be necessary to reconfigure RHN Proxies and clients registered to the server.</para>
    <para>&SCRIPTCOMMAND; takes one mandatory argument - <replaceable>IP_ADDRESS</replaceable> regardless of whether the IP address has changed or not. If there is a need to generate a new SSL certificate, all necessary information will be asked interactively, unless it is specified by the options.
When the system hostname has not changed, the re-generation of a new SSL server certificate is not necessary. However, if at least one <option>--ssl-*</option> option is specified, certificate generation is forced.</para>
</RefSect1>

<RefSect1>
    <Title>Options</Title>
    <variablelist>
        <varlistentry>
            <term><replaceable>IP_ADDRESS</replaceable></term>
            <listitem>
                <para>Default IP address used for the &PRODUCT;.  Mandatory even if the IP address has not changed</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>-h, --help</term>
            <listitem>
                <para>Display the help screen with a list of options.</para>
            </listitem>
        </varlistentry>
    </variablelist>
    <para>Setting one of the following options will force re-generation of the SSL certificate:</para>
    <variablelist>
        <varlistentry>
            <term>--ssl-country=<replaceable>SSL_COUNTRY</replaceable></term>
            <listitem>
                <para>Two letter country code to be used in the new SSL certificate.</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>--ssl-state=<replaceable>SSL_STATE</replaceable></term>
            <listitem>
                <para>State to be used in the new SSL certificate.</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>--ssl-org=<replaceable>SSL_ORG</replaceable></term>
            <listitem>
                <para>Organization name to be used in the new SSL certificate.</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>--ssl-orgunit=<replaceable>SSL_ORGUNIT</replaceable></term>
            <listitem>
                <para>Organization unit name to be used in the new SSL certificate.</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>--ssl-email=<replaceable>SSL_EMAIL</replaceable></term>
            <listitem>
                <para>Email to be used in the new SSL certificate.</para>
            </listitem>
        </varlistentry>
        <varlistentry>
            <term>--ssl-ca-password=<replaceable>SSL_CA_PASSWORD</replaceable></term>
            <listitem>
                <para>Password of the SSL CA to sign the new SSL certificate.</para>
            </listitem>
        </varlistentry>
    </variablelist>
</RefSect1>

<RefSect1>
    <Title>Authors</Title>
    <simplelist>
        <member>Tomáš Leštách<email>tlestach@redhat.com</email></member>
    </simplelist>
</RefSect1>
</RefEntry>
07070100000016000081FD00000000000000000000000162C3F37D00002FA9000000000000000000000000000000000000002A00000000spacewalk-utils/spacewalk-export-channels#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Utility for exporting Sat5-channel-data
#
# Copyright (c) 2014--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

from optparse import Option, OptionParser
import os
import shutil
import stat
import subprocess
import sys

try:
    from spacewalk.common.rhnLog import initLOG, log_debug
    from spacewalk.common.rhnConfig import CFG, initCFG
    from spacewalk.server import rhnSQL
except:
    _LIBPATH = "/usr/share/rhn"
    # add to the path if need be
    if _LIBPATH not in sys.path:
        sys.path.append(_LIBPATH)
    from common import CFG, initCFG, initLOG, log_debug
    from server import rhnSQL

LOG_FILE = '/var/log/rhn/spacewalk-channel-export.log'
# try to do hard links
real_copy = False


def db_init():
    initCFG()
    rhnSQL.initDB()

_query_packages = rhnSQL.Statement("""
select p.id, p.org_id, p.package_size, p.path, c.checksum, c.checksum_type, n.name, evr.epoch, evr.version, evr.release, a.label as arch, ocp.package_id, cc.original_id
from rhnPackage p join rhnChecksumView c on p.checksum_id = c.id
join rhnPackageName n on p.name_id = n.id
join rhnPackageEVR evr on p.evr_id = evr.id
join rhnPackageArch a on p.package_arch_id = a.id
join rhnChannelPackage cp on cp.package_id = p.id
left join rhnChannelCloned cc on cc.id = cp.channel_id
left join rhnChannelPackage ocp on ocp.channel_id = cc.original_id
     and ocp.package_id = cp.package_id
where cp.channel_id = :channel_id
order by n.name
""")

_query_organizations = """
select id, name from web_customer where id in (%s)
"""

_query_all_organizations = rhnSQL.Statement("""
select id, name from web_customer
""")

# change to left join in case we're interested into empty custom channels
_query_channels = rhnSQL.Statement("""
select c.id, c.label, c.name, count(cp.package_id) package_count from rhnChannel c join rhnChannelPackage cp on cp.channel_id = c.id where org_id = :org_id group by c.id, c.label, c.name order by label
""")

_query_repos = rhnSQL.Statement("""
select cs.id, cs.label, cs.source_url from rhnContentSource cs join rhnChannelContentSource ccs on ccs.source_id = cs.id where ccs.channel_id = :channel_id
order by cs.label
""")


def export_packages(options):

    if not os.path.exists(options.directory):
        os.makedirs(options.directory)
    top_level_csv = open(os.path.join(options.directory, 'export.csv'), 'w')
    top_level_csv.write("org_id,channel_id,channel_label,channel_name\n")

    if options.org_ids:
        h = rhnSQL.prepare(_query_organizations % ','.join(map(str, options.org_ids)))
        h.execute()
        orgs = h.fetchall_dict() or []
    else:
        h = rhnSQL.prepare(_query_all_organizations)
        h.execute()
        orgs = h.fetchall_dict() or []

    for org in orgs:
        log(1, "Processing organization: %s" % org["name"])
        h = rhnSQL.prepare(_query_channels)
        h.execute(org_id=org["id"])
        channels = h.fetchall_dict() or []

        for channel in channels:
            log(1, " * channel: %s with: %d packages" % (channel["label"], channel["package_count"]))
            h = rhnSQL.prepare(_query_repos)
            h.execute(channel_id=channel["id"])
            repos = h.fetchall_dict() or []
            if not repos:
                log(2, "  - no repos associated")
            repo_packages = {}
            package_count = {}
            package_count["repo"] = 0
            package_count["export"] = 0
            package_count["parent"] = 0
            package_count["missing"] = 0
            for repo in repos:
                if repo['source_url'].startswith('file://'):
                    log(2, "  - local repo: %s. Skipping." % repo['label'])
                    continue
                repo_packages[repo['id']] = list_repo_packages(repo['label'], repo['source_url'])
                log(2, "  - repo %s with: %s packages." % (repo['label'], str(len(repo_packages[repo['id']]))))

            channel_dir = os.path.join(options.directory, str(org["id"]), str(channel["id"]))
            if not os.path.exists(channel_dir):
                os.makedirs(channel_dir)
            top_level_csv.write("%d,%d,%s,%s\n" % (org['id'], channel['id'], channel['label'], channel['name']))
            channel_csv = open(channel_dir + ".csv", 'w')
            channel_csv.write("org_id,channel_id,channel_label,package_nevra,package_rpm_name,in_repo,in_parent_channel\n")

            h = rhnSQL.prepare(_query_packages)
            h.execute(channel_id=channel["id"])

            while 1:
                pkg = h.fetchone_dict()
                if not pkg:
                    break
                if pkg['path']:
                    abs_path = os.path.join(CFG.MOUNT_POINT, pkg['path'])
                    log(4, abs_path)
                    pkg['nevra'] = pkg_nevra(pkg)
                    if pkg['package_id']:
                        package_count["parent"] += 1
                        if not options.exportedonly:
                            channel_csv.write("%d,%d,%s,%s,%s,%s,%s\n" % (org['id'], channel['id'], channel['label'], pkg['nevra'], os.path.basename(pkg['path']), '', pkg['original_id']))

                    else:
                        repo_id = pkgs_available_in_repos(pkg, repo_packages)
                        if repo_id is not None:
                            package_count["repo"] += 1
                            if not options.exportedonly:
                                channel_csv.write("%d,%d,%s,%s,%s,%s,%s\n" % (org['id'], channel['id'], channel['label'], pkg['nevra'], os.path.basename(pkg['path']), repo_id, ''))

                        else:
                            if not os.path.isfile(abs_path):
                                log(0, "   File missing on the file system: %s" % abs_path)
                                package_count["missing"] += 1
                                continue
                            package_count["export"] += 1
                            if options.size:
                                check_disk_size(abs_path, pkg['package_size'])
                            cp_to_export_dir(abs_path, channel_dir, options)
                            channel_csv.write("%d,%d,%s,%s,%s,%s,%s\n" % (org['id'], channel['id'], channel['label'], pkg['nevra'], os.path.basename(pkg['path']), '', ''))
            channel_csv.close()
            log(2, "  - pkgs available in external repos: %d" % package_count["repo"])
            log(2, "  - pkgs available in clone originals: %d" % package_count["parent"])
            log(2, "  - pkgs exported: %d" % package_count["export"])
            log(2, "  - pkgs to export missing on file system: %d" % package_count["missing"])
            if options.skiprepogeneration:
                log(2, "  - skipping repo generation")
            else:
                log(2, "  - generating repository metadata")
                create_repository(channel_dir, options)
    top_level_csv.close()


def cp_file(source, target):
    global real_copy
    if real_copy:
        shutil.copy(source, target)
    else:
        try:
            # create hard link
            os.link(source, target)
            return
        except OSError:
            # if hard link creation fails,
            # start copying files
            real_copy = True
            shutil.copy(source, target)


def pkg_nevra(pkg):
    # this NEVRA has to match
    # satellite_tools.reposync.ContentPackage.getNEVRA
    epoch = '0'
    if pkg['epoch'] is not None:
        epoch = pkg['epoch']
    return pkg['name'] + '-' + epoch + ':' + pkg['version'] + '-' + pkg['release'] + '.' + pkg['arch']


def cp_to_export_dir(pkg_path, dir, options):
    if not os.path.isfile(pkg_path):
        return
    target = os.path.join(dir, os.path.basename(pkg_path))
    if os.path.isfile(target):
        if options.force:
            os.remove(target)
            cp_file(pkg_path, target)
    else:
        cp_file(pkg_path, target)


def create_repository(repo_dir, options):
    cmd = "createrepo --help | grep 'no-database' | wc -l"
    (status, output) = subprocess.getstatusoutput(cmd)
    hits = int(output)
    if hits: # 'Our' createrepo understands --no-database
        p = subprocess.Popen(["createrepo", "--no-database", repo_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    else:    # 'Our' createrepo does NOT understand --no-database
        p = subprocess.Popen(["createrepo", repo_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = p.communicate()
    split_and_log_to_level(3, 4, out.decode())
    split_and_log_to_level(2, 4, err.decode())


def pkgs_available_in_repos(pkg, repo_packages):
    for id, packages in list(repo_packages.items()):
        if pkg['nevra'] in packages:
            return id
    return None


def list_repo_packages(label, source_url):
    name = "yum_src"
    mod = __import__('spacewalk.satellite_tools.repo_plugins', globals(), locals(), [name])
    submod = getattr(mod, name)
    plugin = getattr(submod, "ContentSource")
    try:
        repo_plugin = plugin(source_url, label)
        packages = [p.getNEVRA() for p in plugin.list_packages(repo_plugin, [])]
        return set(packages)
    except ValueError:
        log(2, "   Invalid repo source_url ... %s" % source_url)
    except:
        log(2, "   Inaccessible repo metadata ... %s" % source_url)
    return set([])


def check_disk_size(abs_path, size):
    if not os.path.isfile(abs_path):
        return None
    file_size = os.stat(abs_path)[stat.ST_SIZE]
    ret = 0
    if file_size != size:
        log(0, "File size mismatch: %s (%s vs. %s)" % (abs_path, size, file_size))
        ret = 1
    return ret


def log(level, *args):
    log_debug(level, *args)
    verbose = options.verbose
    if not verbose:
        verbose = 0
    if verbose >= level:
        print((', '.join([str(i) for i in args])))


def split_and_log_to_level(level, spaces, string):
    for line in string.split('\n'):
        if line != '':
            log(level, " " * spaces + line)


if __name__ == '__main__':

    options_table = [
        Option("-d", "--dir", action="store", dest="directory",
               help="Export directory, required"),
        Option("-e", "--exported-only", action="store_true", dest="exportedonly",
               help="CSV output will contain only exported packages (by default, CVS output contains all packages, even those available in external repositories and in clone original channels)"),
        Option("-f", "--force", action="store", dest="force",
               help="Overwrite exported package rpms, even if already present in the dump"),
        Option("-o", "--org_id", action="append", type="int", dest="org_ids",
               help="Export only organization related channels specified by its id"),
        Option("-q", "--quiet", action="store_const", const=0, dest="verbose",
               help="Run quietly"),
        Option("-s", "--skip-repogeneration", action="store_true", dest="skiprepogeneration",
               help="Repodata generation will be omitted for exported channels"),
        Option("-S", "--no-size", action="store_false", dest="size", default=True,
               help="Don't check package size"),
        Option("-v", "--verbose", action="count", default=1,
               help="Increase verbosity")]
    parser = OptionParser(option_list=options_table)
    (options, args) = parser.parse_args()
    if not options.directory:
        print("Export directory has to be specified. Try --help:\n")
        parser.print_help()
        sys.exit(1)

    initLOG(LOG_FILE, options.verbose or 0)

    db_init()

    export_packages(options)

    sys.exit(0)
07070100000017000081B400000000000000000000000162C3F37D00001FC2000000000000000000000000000000000000003000000000spacewalk-utils/spacewalk-manage-snapshots.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
<!ENTITY PRODUCT "Red Hat Satellite or Spacewalk server">
]>
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-manage-snapshots</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-manage-snapshots</command></RefName>
<RefPurpose>
Report on and purge snapshot-entries by age
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>-r</arg>
        <arg>-i <replaceable>INTERVAL-IN-DAYS</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>--interval-older-than=<replaceable>INTERVAL-IN-DAYS</replaceable></arg>
        <arg>--reports</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>-d <replaceable>DAYS</replaceable></arg>
        <arg>-b <replaceable>ROWS-PER-COMMIT</replaceable></arg>
    </cmdsynopsis>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>--delete-older-than=<replaceable>DAYS</replaceable></arg>
        <arg>--batch-size=<replaceable>ROWS-PER-COMMIT</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>-h</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>spacewalk-manage-snapshots</command>
        <arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para>
        <emphasis>spacewalk-manage-snapshots</emphasis> reports on and purges snapshots stored in the &PRODUCT; instance on which the command is executed.
    </para>
    <para>
        Snapshots are used to rollback system state when changes go awry. The snapshot-related tables grow with every change to a system; over time, they can grow quite large. This command is provided to give the &PRODUCT; administrator a tool to cull old entries, since old snapshots are rarely of value.
    </para>
    <para>
        When <option>--reports</option> is specified, the tool reports on the current size of all snapshot-related tables, as well as snapshot-age. Snapshots are grouped into 'buckets' based on an interval expressed as days. The default interval is 90 days; this can be overridden using the <option>--interval-older-than</option> switch.
    </para>
    <para>
        When <option>--delete-older-than</option> is specified, the tool will arrange to delete all snapshots older than the specified number of days. The delete happens in batches; the default batch-size is 1000 snapshots. If a different batchsize is desired, this default can be overridden using the <option>--batch-size</option> switch.
    </para>
    <para>
        There are three reasons the delete happens in batches. First, in the presence of hundreds of thousands or more snapshots to be deleted, the resulting cascading deletes can take tens of minutes to hours. Committing in batches means that even if the tool is interrupted for some reason, it will have made progress. Second, deleting large numbers of rows in one transaction can have painful implications for the size of undo/redo logs and general application performance. And third, since the delete-process impacts the snapshot-table, it can interfere with system registrations and deletes. Committing in batches provides opportunities for other processing involving snapshots to continue in parallel with the cleanup process.
    <para>
        When <option>--delete-older-than</option> is chosen, an entry is made to <emphasis>/var/log/rhn/spacewalk-manage-snapshots.log</emphasis> with the timestamp, executing-login, action, and results.
    </para>
    <para>
        <command>spacewalk-manage-snapshots</command> is expected to be run as <emphasis>root</emphasis>, on the &PRODUCT; instance. It relies on the content of <command>/etc/rhn/rhn.conf</command> and direct access to the &PRODUCT; database to accomplish its tasks.
    </para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-b <replaceable>ROWS-PER-COMMIT</replaceable>, --batch-size=<replaceable>ROWS-PER-COMMIT</replaceable></term>
        <listitem>
            <para>Number of rows to delete per commit (default is 1000)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-d <replaceable>DAYS</replaceable>, --delete-older-than=<replaceable>DAYS</replaceable></term>
        <listitem>
            <para>Snapshots DAYS old or older will be purged from the database</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-i <replaceable>INTERVAL-IN-DAYS</replaceable>, --interval-older-than=<replaceable>INTERVAL-IN-DAYS</replaceable></term>
        <listitem>
            <para>INTERVAL-in-days period to use for --reports (default is 90 days)</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-r, --reports</term>
        <listitem>
            <para>Report current table-sizes (in rows) for all snapshot-related tables and report on the last four --interval periods for snapshots</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>Examples</Title>
    <example>
        <title>Show the current state of snapshots on my system, including snapshots by 200-day buckets</title>
        <para># spacewalk-manage-snapshots --reports -interval-older-than 200</para>
        <screen>
                Table name :         rows
               RHNSNAPSHOT :       101872
        RHNSNAPSHOTCHANNEL :       305616
  RHNSNAPSHOTCONFIGCHANNEL :            0
 RHNSNAPSHOTCONFIGREVISION :            0
  RHNSNAPSHOTINVALIDREASON :            6
        RHNSNAPSHOTPACKAGE :     20886008
    RHNSNAPSHOTSERVERGROUP :       203744
            RHNSNAPSHOTTAG :            0

:   Snapshot info, 200-day interval    :
: age(days) :   systems :    snapshots :
:    1-200  :         3 :        71872 :
:  201-400  :         3 :        20000 :
:  401-600  :         3 :        10000 :
        </screen>
    </example>
    <example>
        <title>Delete all snapshots older than 400 days old, in batches of 1000 snapshots</title>
        <para># spacewalk-manage-snapshots --delete-older-than 401 --batch-size 1000</para>
        <screen>
time ./spacewalk-manage-snapshots -d 401 -b 1000
Deleting snapshots older than 401 days
      101872 snapshots currently
       10000 snapshots to be deleted, 1000 per commit
...       10000 snapshots left to purge
...        9000 snapshots left to purge
...        8000 snapshots left to purge
...        7000 snapshots left to purge
...        6000 snapshots left to purge
...        5000 snapshots left to purge
...        4000 snapshots left to purge
...        3000 snapshots left to purge
...        2000 snapshots left to purge
...        1000 snapshots left to purge
       91872 snapshots remain

real	0m26.544s
user	0m0.080s
sys	0m0.020s
        </screen>
    </example>
</RefSect1>

<RefSect1><Title>Log Output</Title>
    <para>
        When --delete-older-than is chosen, spacewalk-manage-snapshots writes its processing to a log file. This file can be found at <emphasis>/var/log/rhn/spacewalk-manage-snapshots.log</emphasis>
    </para>
    <para>
        Log output includes:
    </para>
    <itemizedlist>
        <listitem><para>Number of snapshots at start</para></listitem>
        <listitem><para>Number of snapshots to be deleted, and batch-size</para></listitem>
        <listitem><para>Each batch as it is committed</para></listitem>
        <listitem><para>Number of snapshots after completion</para></listitem>
    </itemizedlist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Grant Gainey <email>ggainey@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
07070100000018000081B400000000000000000000000162C3F37D000031BE000000000000000000000000000000000000002200000000spacewalk-utils/systemSnapshot.py"""
Delete Snapshots: Script to delete system snapshots.

This script using the XMLRPC APIs will connect to the Satellite and
list or delete system snapshots based on the parameters given by the user.

Copyright (c) 2009--2017 Red Hat, Inc.  Distributed under GPL.
Author: Brad Buckingham <bbuckingham@redhat.com>

"""

import os
import sys
from time import strptime
from datetime import datetime

from optparse import OptionParser, Option  # pylint: disable=deprecated-module
from uyuni.common.cli import getUsernamePassword, xmlrpc_login, xmlrpc_logout

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

_topdir = '/usr/share/rhn'
if _topdir not in sys.path:
    sys.path.append(_topdir)

client = None

options_table = [
    Option("-v", "--verbose",        action="count",
           help="Increase verbosity"),
    Option("-u", "--username",       action="store",
           help="Username"),
    Option("-p", "--password",       action="store",
           help="Password"),
    Option("-d", "--delete",         action="count",
           help="Delete snapshots."),
    Option("-l", "--list",           action="count",
           help="List snapshot summary."),
    Option("-L", "--long-list",           action="count",
           help="Display comprehensive snapshot list."),
    Option("-a", "--all",            action="count",
           help="Include all snapshots based on criteria provided."),
    Option("--start-date",           action="store",
           help="Include only snapshots taken on or after this date.  Must be in the format 'YYYYMMDDHH24MISS'."),
    Option("--end-date",             action="store",
           help="Include only snapshots taken on or before this date. Must be in the format 'YYYYMMDDHH24MISS'."),
    Option("--satellite",            action="store",
           help="Server."),
    Option("--system-id",            action="append",
           help="System Id."),
    Option("--snapshot-id",          action="append",
           help="Snapshot Id."),
]

options = None


def main():

    global client, options

    parser = OptionParser(option_list=options_table)
    (options, _args) = parser.parse_args()
    processCommandLine()

    satellite_url = "http://%s/rpc/api" % options.satellite

    if options.verbose:
        print(("start date=", options.start_date))
        print(("end date=", options.end_date))
        print(("connecting to %s" % satellite_url))

    client = xmlrpclib.Server(satellite_url, verbose=0)

    username, password = getUsernamePassword(options.username,
                                             options.password)

    sessionKey = xmlrpc_login(client, username, password, options.verbose)

    if options.all:

        if options.start_date and options.end_date:
            deleteAllBetweenDates(sessionKey, options.start_date,
                                  options.end_date)

        elif options.start_date:
            deleteAllAfterDate(sessionKey, options.start_date)

        else:
            deleteAll(sessionKey)

    elif options.system_id:

        if options.start_date and options.end_date:
            deleteBySystemBetweenDates(sessionKey, options.system_id,
                                       options.start_date, options.end_date)

        elif options.start_date:
            deleteBySystemAfterDate(sessionKey, options.system_id,
                                    options.start_date)

        else:
            deleteBySystem(sessionKey, options.system_id)

    elif options.snapshot_id:

        deleteBySnapshotId(sessionKey, options.snapshot_id)

    if options.verbose:
        print("Delete Snapshots Completed successfully")

    xmlrpc_logout(client, sessionKey, options.verbose)


def deleteAllBetweenDates(sessionKey, startDate, endDate):
    """
     Delete all snapshots where the snapshot was created either on or between
     the dates provided.
    """
    if options.verbose:
        print("...executing deleteAllBetweenDates...")

    systems = client.system.listSystems(sessionKey)

    for system in systems:

        snapshots = client.system.provisioning.snapshot.listSnapshots(
            sessionKey, system.get('id'), {"startDate": startDate,
                                           "endDate": endDate})

        if options.list:
            listSnapshots(system.get('id'), snapshots)
        elif options.long_list:
            listSnapshotsLong(system.get('id'), snapshots)
        else:
            client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
                                                                {"startDate": startDate, "endDate": endDate})


def deleteAllAfterDate(sessionKey, startDate):
    """
     Delete all snapshots where the snapshot was created either on or after
     the date provided.
    """
    if options.verbose:
        print("...executing deleteAllAfterDate...")

    systems = client.system.listSystems(sessionKey)

    for system in systems:

        snapshots = client.system.provisioning.snapshot.listSnapshots(
            sessionKey, system.get('id'), {"startDate": startDate})

        if options.list:
            listSnapshots(system.get('id'), snapshots)
        elif options.long_list:
            listSnapshotsLong(system.get('id'), snapshots)
        else:
            client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
                                                                {"startDate": startDate})


def deleteAll(sessionKey):
    """
     Delete all snapshots across all systems that the user has access to.
    """
    if options.verbose:
        print("...executing deleteAll...")

    systems = client.system.listSystems(sessionKey)

    for system in systems:

        snapshots = client.system.provisioning.snapshot.listSnapshots(
            sessionKey, system.get('id'), {})

        if options.list:
            listSnapshots(system.get('id'), snapshots)
        elif options.long_list:
            listSnapshotsLong(system.get('id'), snapshots)
        else:
            client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
                                                                {})


def deleteBySystemBetweenDates(sessionKey, systemIds, startDate, endDate):
    """
     Delete the snapshots for the systems provided where the snapshot was
     created either on or between the dates provided.
    """
    if options.verbose:
        print("...executing deleteBySystemBetweenDates...")

    for systemId in systemIds:
        systemId = int(systemId)

        try:
            snapshots = client.system.provisioning.snapshot.listSnapshots(
                sessionKey, systemId, {"startDate": startDate,
                                       "endDate": endDate})

            if options.list:
                listSnapshots(systemId, snapshots)
            elif options.long_list:
                listSnapshotsLong(systemId, snapshots)
            else:
                client.system.provisioning.snapshot.deleteSnapshots(
                    sessionKey, systemId,
                    {"startDate": startDate, "endDate": endDate})

        except xmlrpclib.Fault as e:
            # print an error and go to the next system
            sys.stderr.write("Error: %s\n" % e.faultString)


def deleteBySystemAfterDate(sessionKey, systemIds, startDate):
    """
     Delete the snapshots for the systems provided where the snapshot was
     created either on or after the date provided.
    """
    if options.verbose:
        print("...executing deleteBySystemAfterDate...")

    for systemId in systemIds:
        systemId = int(systemId)

        try:
            snapshots = client.system.provisioning.snapshot.listSnapshots(
                sessionKey, systemId, {"startDate": startDate})

            if options.list:
                listSnapshots(systemId, snapshots)
            elif options.long_list:
                listSnapshotsLong(systemId, snapshots)
            else:
                client.system.provisioning.snapshot.deleteSnapshots(
                    sessionKey, systemId, {"startDate": startDate})

        except xmlrpclib.Fault as e:
            # print an error and go to the next system
            sys.stderr.write("Error: %s\n" % e.faultString)


def deleteBySystem(sessionKey, systemIds):
    """
     Delete all snapshots for the systems provided.
    """
    if options.verbose:
        print("...executing deleteBySystem...")

    for systemId in systemIds:
        systemId = int(systemId)

        try:
            snapshots = client.system.provisioning.snapshot.listSnapshots(
                sessionKey, systemId, {})

            if options.list:
                listSnapshots(systemId, snapshots)
            elif options.long_list:
                listSnapshotsLong(systemId, snapshots)
            else:
                client.system.provisioning.snapshot.deleteSnapshots(
                    sessionKey, systemId, {})

        except xmlrpclib.Fault as e:
            # print an error and go to the next system
            sys.stderr.write("Error: %s\n" % e.faultString)


def deleteBySnapshotId(sessionKey, snapshotIds):
    """
     Delete the list of snapshots provided.  If the user does not have
     access to one or more of those snapshots, they will be ignored.
    """
    if options.verbose:
        print("...executing deleteBySnapshotId...")

    for snapshotId in snapshotIds:

        try:
            if options.list:
                print(("snapshotId: ", snapshotId))

            else:
                client.system.provisioning.snapshot.deleteSnapshot(sessionKey,
                                                                   int(snapshotId))

        except xmlrpclib.Fault as e:
            # print an error and go to the next system
            sys.stderr.write("Error: %s\n" % e.faultString)


def listSnapshots(systemId, snapshots):
    """
      List to stdout the snapshot summaries for the system provided.
      This will include:
        system id, # snapshots, date of oldest snapshot, date of newest snapshot
    """
    if snapshots:
        # obtain the dates of the oldest and newest snapshot...
        #
        # the dates will be in dateTime.iso8601 format
        # (e.g. 20090325T13:18:11); therefore, convert them to a
        # friendlier format (e.g. 2009-03-25 13:18:11) for output

        newest = snapshots[0].get('created')
        newest = datetime(*(strptime(newest.value, "%Y%m%dT%H:%M:%S")[0:6]))

        oldest = snapshots[len(snapshots) - 1].get('created')
        oldest = datetime(*(strptime(oldest.value, "%Y%m%dT%H:%M:%S")[0:6]))

        print(("systemId: %d, snapshots: %d, oldest: %s, newest: %s"  \
            % (systemId, len(snapshots), oldest, newest)))


def listSnapshotsLong(systemId, snapshots):
    """
      List to stdout the comprehensive summaries of snapshots for the system provided.
    """
    for snapshot in snapshots:
        print(("systemId: %d, snapshotId: %d, created: %s, reason: %s" % \
            (systemId,
             snapshot['id'],
             datetime(*(strptime(snapshot['created'].value, "%Y%m%dT%H:%M:%S")[0:6])),
             snapshot['reason'])))


def processCommandLine():

    if not options.satellite:
        options.satellite = os.uname()[1]

    if not options.delete and not options.list and not options.long_list:
        sys.stderr.write("Must include a command options (--list, --long-list, --delete)\n")
        sys.exit(1)

    if not options.all and not options.system_id and not options.snapshot_id:
        sys.stderr.write("Must include one of the required parameters (--all, --system-id or --snapshot-id\n")
        sys.exit(1)

    if options.snapshot_id and (options.start_date or options.end_date):
        sys.stderr.write("--start-date and --end-date options do not apply when specifying --snapshot-id\n")
        sys.exit(1)

    if options.end_date and not options.start_date:
        sys.stderr.write("--end-date must be used with --start-date.\n")
        sys.exit(1)

    if options.list and options.long_list:
        sys.stderr.write("-l (--list) and -L (--long-list) are mutually exclusive.\n")
        sys.exit(1)

    # convert the start / end dates to a format that usable by the xmlrpc api
    if options.start_date:
        options.start_date = datetime(*(strptime(options.start_date, "%Y%m%d%H%M%S")[0:6]))
        options.start_date = xmlrpclib.DateTime(options.start_date.timetuple())

    if options.end_date:
        options.end_date = datetime(*(strptime(options.end_date, "%Y%m%d%H%M%S")[0:6]))
        options.end_date = xmlrpclib.DateTime(options.end_date.timetuple())

if __name__ == '__main__':
    sys.exit(main() or 0)
07070100000019000081FD00000000000000000000000162C3F37D000027B1000000000000000000000000000000000000001D00000000spacewalk-utils/apply_errata#!/usr/bin/python3

"""

 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
 the Free Software Foundation; version 2 of the License.

 This program is distributed in the hope that it will be useful,
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details.

 Copyright 2009  Colin Coe <colin.coe@gmail.com>

 Portions of code reused from the Satellite/Spacewalk documentation and
 'Mastering Regular Expressions (2nd Edition)' and various sources on
 the Internet

"""

import getopt
import sys
import time
import os
import re
from time import localtime
from sys import argv
import getpass
from subprocess import call

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

try:
    import httplib
except ImportError:
    import http.client as httplib  # pylint: disable=F0401

prog = sys.argv[0]
today = "%d-%d-%d" % (localtime()[2], localtime()[1], localtime()[0])
erratum = []
opt = dict()
opt['test'] = True
opt['user'] = ""
opt['passwd'] = ""
opt['sat_host'] = ""
opt['date'] = time.localtime(time.time())
opt['proxy'] = ""


def usage(code):
    offset = time.time() - (45 * 86400)
    today = "%d-%d-%d" % (localtime(offset)[2], localtime(offset)[1], localtime(offset)[0])
    if code is not None:
        try:
            code = int(code)
        except:
            code = 0
    else:
        code = 0
    print("-a            - all errata - implies -b -e -s")
    print("-b            - bug fix errata")
    print("-d <date>     - date (in m-d-yy format) - defaults to today's date")
    print("-e            - enhancement errata")
    print("-s            - security errata")
    print("-r            - do it for real - required to make the script actually do")
    print("                updates")
    print("-v            - be verbose, implied if -r is not supplied")
    print("-n            - call /usr/sbin/rhn_check rather than waiting for the next")
    print("                scheduled checkin")
    print("-u <username> - username for connecting to RHN hosted or the Satellite server")
    print("-p <password> - password for connecting to RHN hosted or the Satellite server")
    print("-P <proxy>    - the proxy server to use")
    print("-z <search>   - only apply errata matching this regular expression")
    print("-h            - this help message")
    print("Notes:")
    print("1) Errata are scheduled only if they have a date of <date> or older")
    print("2) -u and -p can be ommitted if the environment variables RHN_USER and RHN_PASS")
    print("   are defined. If -u/-p are provided and RHN_USER and RHN_PASS are also")
    print("   defined, the -u/-p switches have preference.")
    print("   If the password is '-' then the user is asked to enter the password via stdin")
    print("3) the search is case insensitive and looks in the errata synopsis.  An")
    print("   example use is:")
    print(" '%s -u admin -p - -P squid:3128 -s -d %s -z \"critical|important\"'" % (prog, today))
    print("-P <proxy> is needed to define the proxy server (and port) to use and overrides")
    print("   the environment variable RHN_PROXY")
    sys.exit(code)


def testEnvVar(ev):
    try:
        var = os.environ[ev]
    except:
        return False
    return True


def getSysId():
    infile = open("/etc/sysconfig/rhn/systemid", "r")
    text = infile.read()
    pattern = re.compile(">ID-([0-9]{10})<")
    result = pattern.search(text)
    infile.close()
    if result is None:
        print("No system ID found, please register the node with RHN or your local Satellite/")
        print("Spacewalk server using rhn_register or another appropriate tool")
        sys.exit(3)
    return int(result.group(1))


def getServer():
    infile = open("/etc/sysconfig/rhn/up2date", "r")
    text = infile.read()
    pattern = re.compile("(serverURL=).*")
    result = pattern.search(text)
    infile.close()
    if result is None:
        print("No server details found.  This should not happen as RHN hosted is")
        print("present by default.  Resolve and re-run.")
        sys.exit(3)
    return result.group(0).split("/")[2]


class ProxiedTransport(xmlrpclib.Transport):

    def set_proxy(self, proxy):
        self.proxy = opt['proxy']

    def make_connection(self, host):
        self.realhost = host
        h = httplib.HTTP(self.proxy)
        return h

    def send_request(self, connection, handler, request_body):
        connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))

    def send_host(self, connection, host):
        connection.putheader('Host', self.realhost)


def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], "abd:esrvhu:p:P:z:n")
    except getopt.GetoptError:
        # print help information and exit:
        usage(2)

    type = dict()
    EI = dict()
    EN = dict()
    ED = dict()

    type['b'] = False
    type['e'] = False
    type['s'] = False

    if len(argv) == 1:
        usage(1)

    if testEnvVar('RHN_USER'):
        opt['user'] = os.environ['RHN_USER']

    if testEnvVar('RHN_PASS'):
        opt['passwd'] = os.environ['RHN_PASS']

    if testEnvVar('RHN_PROXY'):
        opt['proxy'] = os.environ['RHN_PROXY']

    opt['verbose'] = False
    opt['rhn_check'] = False
    for o, a in opts:
        if o == "-v":
            opt['verbose'] = True
        if o in ("-h"):
            usage(0)
        if o in ("-p"):
            if opt['passwd'] == "":
                opt['passwd'] = a
        if o in ("-u"):
            if opt['user'] == "":
                opt['user'] = a
        if o in ("-P"):
            if opt['proxy'] == "":
                opt['proxy'] = a
        if o in ("-d"):
            opt['date'] = a
        if o in ("-b") or o in ("-a"):
            type['b'] = True
        if o in ("-e") or o in ("-a"):
            type['e'] = True
        if o in ("-s") or o in ("-a"):
            type['s'] = True
        if o in ("-r"):
            opt['test'] = False
        if o in ("-z"):
            opt['search'] = a
        if o in ("-n"):
            opt['rhn_check'] = True

        if opt['passwd'] == '-':
            opt['passwd'] = getpass.getpass()

    try:
        if opt['search'] == "":
            opt['search'] = ".*"
    except:
        opt['search'] = ".*"

    if not type['b'] and not type['e'] and not type['s']:
        # Nothing selected, assuming all
        type['b'] = True
        type['e'] = True
        type['s'] = True

    if opt['test'] == True:
        opt['verbose'] = True

    if opt['user'] == "":
        print("-u <username> not supplied and environment variable RHN_USER not set")
        sys.exit(4)

    if opt['passwd'] == "":
        print("-p <password> not supplied and environment variable RHN_PASS not set")
        sys.exit(5)

    if opt['sat_host'] == "":
        opt['sat_host'] = getServer()
    sid = getSysId()

    SATELLITE_URL = "http://%s/rpc/api" % opt['sat_host']

    # If no proxy is defined, assume no proxy needed
    if opt['proxy'] != "":
        p = ProxiedTransport()
        p.set_proxy(opt['proxy'])
        client = xmlrpclib.Server(SATELLITE_URL, verbose=0, transport=p)
    else:
        client = xmlrpclib.Server(SATELLITE_URL, verbose=0)
    session = client.auth.login(opt['user'], opt['passwd'])

    ue = client.system.getUnscheduledErrata(session, sid)

    for e in ue:
        year = int(e['date'].split("/")[2]) + 2000
        month = int(e['date'].split("/")[0])
        day = int(e['date'].split("/")[1])
        e_epoch = int(time.mktime(time.strptime('%d-%d-%d 00:00:00' % (year, month, day), '%Y-%m-%d %H:%M:%S')))

        try:
            year = int(opt['date'].split('-')[2])
            month = int(opt['date'].split('-')[0])
            day = int(opt['date'].split('-')[1])
        except:
            year = int(opt['date'][0])
            month = int(opt['date'][1])
            day = int(opt['date'][2])

        d_epoch = int(time.mktime(time.strptime('%d-%d-%d 00:00:00' % (year, month, day), '%Y-%m-%d %H:%M:%S')))
        ED[e['id']] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime(e_epoch))

        if e_epoch < d_epoch:
            EI[e['id']] = e['advisory_synopsis']
            EN[e['id']] = e['advisory_name']

            pattern = re.compile(opt['search'], re.I)
            result = pattern.search(e['advisory_synopsis'])

            if result is not None:
                if type['b'] and (e['advisory_name'].find("RHBA") == 0 or e['advisory_type'].find("Bug Fix Advisory") == 0):
                    erratum.append(e['id'])

                if type['e'] and (e['advisory_name'].find("RHEA") == 0 or e['advisory_type'].find("Product Enhancement Advisory") == 0):
                    erratum.append(e['id'])

                if type['s'] and (e['advisory_name'].find("RHSA") == 0 or e['advisory_type'].find("Security Advisory") == 0):
                    erratum.append(e['id'])

    if opt['verbose']:
        print("Notes:")
        print("\tUsing: %s" % opt['sat_host'])
        if opt['test']:
            print("\tRunning in test mode, updates will not be scheduled")
        else:
            print("\tUpdates *will* be scheduled")
        if type['b']:
            print("\tBug fix errata selected")
        if type['e']:
            print("\tEnhancement errata selected")
        if type['s']:
            print("\tSecurity errata selected")
        print("\t%d errata selected" % len(erratum))
        print("")

    if opt['verbose']:
        for errata in erratum:
            print("(Errata ID: %04d, Errata Name: %s, Released: %s) %s" % (errata, EN[errata], ED[errata], EI[errata]))

    if not opt['test']:
        client.system.scheduleApplyErrata(session, sid, erratum)
        command = ["/usr/sbin/rhn_check"]
        if opt['verbose']:
            print("\tDeploying updates now")
            command += ["-v"]
        call(command, shell=False)

    client.auth.logout(session)


if __name__ == "__main__":
    main()
0707010000001A000081FD00000000000000000000000162C3F37D0000547D000000000000000000000000000000000000002A00000000spacewalk-utils/spacewalk-hostname-rename#!/bin/bash
#
# Copyright (c) 2010--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

if [ 0$UID -gt 0 ]; then
    echo "$0 has to be run as root."
    exit 1
fi

LOG=/var/log/rhn/rhn_hostname_rename.log
RHN_CONF_FILE=/etc/rhn/rhn.conf
SSL_BUILD_DIR=/root/ssl-build
ETC_JABBERD_DIR=/etc/jabberd
HTTP_PUB_DIR=$(spacewalk-cfg-get documentroot)/pub/
BOOTSTRAP_SH=$(spacewalk-cfg-get documentroot)/pub/bootstrap/bootstrap.sh
BOOTSTRAP_CCO=$(spacewalk-cfg-get documentroot)/pub/bootstrap/client-config-overrides.txt
SAT_LOCAL_RULES_CONF=/var/lib/rhn/rhn-satellite-prep/satellite-local-rules.conf
MGR_SYNC_CONF=/root/.mgr-sync
BACKUP_EXT=.rnmbck
CA_CERT_TRUST_DIR=/etc/pki/ca-trust/source/anchors
if [ -d /etc/pki/trust/anchors/ ]; then
    CA_CERT_TRUST_DIR=/etc/pki/trust/anchors/
fi

DB_BACKEND="$(spacewalk-cfg-get db_backend)"
if [ "$DB_BACKEND" = "oracle" ]; then
    DBSHELL_QUIT="QUIT"
    DBSHELL_QUIET="
set feed off;
set pages 0;"

    if [ -x /etc/init.d/oracle ]; then
        DB_SERVICE="oracle"
    fi

elif [ "$DB_BACKEND" = "postgresql" ]; then
    DBSHELL_QUIT="\q"
    DBSHELL_QUIET="
\set QUIET on
\t"
    if [ -x /etc/init.d/postgresql -o -f /usr/lib/systemd/system/postgresql.service ]; then
        DB_SERVICE="postgresql"
    fi
    if [ -x /etc/init.d/postgresql92-postgresql ]; then
        DB_SERVICE="postgresql92-postgresql"
    fi
fi

ORACLE_XE_LISTENER_ORA_FILE=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server/network/admin/listener.ora
ORACLE_XE_TNSNAMES_ORA_FILE=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server/network/admin/tnsnames.ora
ORACLE_LISTENER_ORA_FILE=/opt/apps/oracle/product/11gR2/dbhome_1/network/admin/listener.ora
ORACLE_TNSNAMES_ORA_FILE=/opt/apps/oracle/product/11gR2/dbhome_1/network/admin/tnsnames.ora

SPW_SETUP_JABBERD=/usr/bin/spacewalk-setup-jabberd

IPV4ADDR_REGEX="^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$"
IPV6ADDR_REGEX="^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$"
IPADDR_REGEX="($IPV4ADDR_REGEX)|($IPV6ADDR_REGEX)"

###############################################################################

function echo_usage {
    echo "Usage:"
    echo "   $(basename $0) <IP_ADDRESS> [ --ssl-country=<SSL_COUNTRY> --ssl-state=<SSL_STATE> --ssl-city=<SSL_CITY> --ssl-org=<SSL_ORG> --ssl-orgunit=<SSL_ORGUNIT> --ssl-email=<SSL_EMAIL> --ssl-ca-password=<SSL_CA_PASSWORD> --ssl-ca-cert=<PATH> --ssl-server-key=<PATH> --ssl-server-cert=<PATH>]"
    echo "   $(basename $0) { -h | --help }"
    exit 1
}

function echo_err {
    echo "$*" >&2
    echo "$*" >> $LOG
}

function bye {
    echo_err "Fix the problem and run $0 again"
    exit 1
}

function print_status {
    # strip whitespace
    STATUS="${1#"${1%%[![:space:]]*}"}"
    if [ "$STATUS" == "0" ]
    then
        echo "OK" | tee -a $LOG
    else
        echo_err "FAILED"
        shift
        echo_err $*
        bye
    fi
}

function check_input_ip {
   IPS=`echo $(/sbin/ip a | awk  'BEGIN {FS=" ";RS="/"}; /inet|inet6 / && ! /127.0.|::1/ {print $NF}')`
   for ip in $IPS
   do
       if [ "$1" == "$ip" ]
       then
          return 0
       fi
   done
   return 1
}

function initial_system_hostname_check {
    # only reliable hostname is in /proc/sys/kernel/hostname
    SYSCTL_HOSTNAME=`cat /proc/sys/kernel/hostname`
    if [ "$HOSTNAME" != "$SYSCTL_HOSTNAME" ]
    then
        echo_err "Wrong HOSTNAME variable: \"$HOSTNAME\""
        return 1
    fi

    # check hostname command
    if [ "$HOSTNAME" != "$(hostname)" ]
    then
        echo_err "Wrong hostname command output: \"$(hostname)\""
        return 1
    fi

    if [ "$(hostname).$(hostname -d)" != "$(hostname -f)" ]
    then
        echo_err "Wrong hostname command output: \"$(hostname -f)\""
        return 1
    fi

    # set HOSTNAME to long hostname
    HOSTNAME=`hostname -f`
    SHORT_HN=`hostname -s`

    # check for uppercase chars in hostname
    if [ "$HOSTNAME" != "$(echo $HOSTNAME | tr '[:upper:]' '[:lower:]')" ]
    then
        echo_err "Uppercase characters are not allowed for satellite hostname."
        return 1
    fi

    # check /etc/sysconfig/network or /etc/hostname
    HN_ETC_FILE="/etc/hostname"
    if [ -f "$HN_ETC_FILE" ]
    then
        HN_ETC=`cat $HN_ETC_FILE`
    elif [ -f /etc/HOSTNAME ]
    then
        HN_ETC_FILE="/etc/HOSTNAME"
        HN_ETC=`cat $HN_ETC_FILE`
    elif [ -f /etc/sysconfig/network ]
    then
        HN_ETC_FILE="/etc/sysconfig/network"
        HN_ETC=`awk -F= '/HOSTNAME/ {print $2}' $HN_ETC_FILE`
    fi

    # either short or long hostname would be ok
    if [ "$SHORT_HN" != "$HN_ETC" -a "$HOSTNAME" != "$HN_ETC" ]
    then
        echo_err "Wrong hostname in $HN_ETC_FILE: \"$HN_ETC\""
        return 1
    fi

    return 0
}

function backup_file {
    if [ -e ${1} ]
    then
        cp ${1} ${1}${BACKUP_EXT}
    else
        echo "Backup of ${1} failed. File not found." >> $LOG
    fi
}

function update_rhn_conf {
    backup_file ${SAT_LOCAL_RULES_CONF}
    # store config to satellite-local-rules.conf
    /usr/bin/rhn-config-satellite.pl \
       --target=${SAT_LOCAL_RULES_CONF} \
       --option=jabberDOThostname=$HOSTNAME \
       --option=cobblerDOThost=$HOSTNAME \
       >> $LOG 2>&1
    # but do not deploy (we'd lose actual configuration)
    # /usr/bin/satcon-deploy-tree.pl \
    #    --source=/var/lib/rhn/rhn-satellite-prep/etc
    #    --dest=/etc
    #    --conf=$SAT_LOCAL_RULES_CONF
    #    >> $LOG 2>&1
    backup_file ${RHN_CONF_FILE}
    /usr/bin/rhn-config-satellite.pl \
        --target=${RHN_CONF_FILE} \
        --option=server.jabber_server=$HOSTNAME \
        --option=osa-dispatcher.jabber_server=$HOSTNAME \
        --option=cobbler.host=$HOSTNAME >> $LOG 2>&1
}

function re-generate_server_ssl_certificate {
    # default is to generate new SSL certificate

    if [ -n "$CML_NEW_SSL_CERT_REQUEST" -o -n "$CML_THIRD_PARTY_CERT" ]
    then

        # is there a need to re-generate SSL certificate?
        if [ -z "$CML_THIRD_PARTY_CERT" ]
        then
            if [ -n "$SUBJECT" ]
            then
                SUBJECT_PATTERN='[[:space:]]*Subject: C=\(..*\), ST=\(..*\), O=\(..*\), OU=\(..*\), CN=\(..*\)\/emailAddress=\(..*\)'
                SSL_COUNTRY_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\1/"`
                SSL_STATE_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\2/"`
                SSL_CITY_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\2/"`
                SSL_ORG_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\3/"`
                SSL_ORGUNIT_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\4/"`
                SSL_EMAIL_OLD=`echo $SUBJECT | sed "s/$SUBJECT_PATTERN/\6/"`
            fi

            echo "Starting generation of new SSL certificate:"
            # COUNTRY
            if [ -n "${CML_SSL_COUNTRY+x}" ]
            then
                SSL_COUNTRY=${CML_SSL_COUNTRY}
            else
                read -e -p " Enter Country [$SSL_COUNTRY_OLD] : "
                SSL_COUNTRY=${REPLY:-$SSL_COUNTRY_OLD}
            fi
            # STATE
            if [ -n "${CML_SSL_STATE+x}" ]
            then
                SSL_STATE=${CML_SSL_STATE}
            else
                read -e -p " Enter State [$SSL_STATE_OLD] : "
                SSL_STATE=${REPLY:-$SSL_STATE_OLD}
            fi
            # CITY
            if [ -n "${CML_SSL_CITY+x}" ]
            then
                SSL_CITY=${CML_SSL_CITY}
            else
                read -e -p " Enter City [$SSL_CITY_OLD] : "
                SSL_CITY=${REPLY:-$SSL_CITY_OLD}
            fi
            # ORGANIZATION
            if [ -n "${CML_SSL_ORG+x}" ]
            then
                SSL_ORG=${CML_SSL_ORG}
            else
                read -e -p " Enter Organization [$SSL_ORG_OLD] : "
                SSL_ORG=${REPLY:-$SSL_ORG_OLD}
            fi
            # ORGANIZATION UNIT
            if [ -n "${CML_SSL_ORGUNIT+x}" ]
            then
                SSL_ORGUNIT=${CML_SSL_ORGUNIT}
            else
                # offer hostname as ORG UNIT everytime
                read -e -p " Enter Organization Unit [$HOSTNAME] : "
                SSL_ORGUNIT=${REPLY:-$HOSTNAME}
            fi
            # EMAIL ADDRESS
            if [ -n "${CML_SSL_EMAIL+x}" ]
            then
                SSL_EMAIL=${CML_SSL_EMAIL}
            else
                read -e -p " Enter Email Address [$SSL_EMAIL_OLD] : "
                SSL_EMAIL=${REPLY:-$SSL_EMAIL_OLD}
            fi
            # CA PASSWORD
            # ask explicitelly (different behaviour on sat and spw)
            if [ -n "${CML_SSL_CA_PASSWORD+x}" ]
            then
                SSL_CA_PASSWORD=${CML_SSL_CA_PASSWORD}
            else
                read -e -p " Enter CA password : " -s
                echo
                SSL_CA_PASSWORD=${REPLY}
            fi

            echo " Generating SSL certificates:" | tee -a $LOG
            GEN_NEW_CA="n"
            if [ -f $SSL_BUILD_DIR/RHN-ORG-TRUSTED-SSL-CERT ]; then
                GEN_NEW_CA="y"
                # We don't have the CA in SSL build dir: generate a new one
                echo " Generating SSL CA Certificate:" | tee -a $LOG
                # just log the SSL info ...
                echo "rhn-ssl-tool --gen-ca --no-rpm --force \
                    --dir="$SSL_BUILD_DIR" \
                    --set-country="$SSL_COUNTRY" \
                    --set-state="$SSL_STATE" \
                    --set-city="$SSL_CITY" \
                    --set-org="$SSL_ORG" \
                    --set-org-unit="$SSL_ORGUNIT" \
                    --set-common-name="${HOSTNAME}" \
                " >> $LOG
                rhn-ssl-tool --gen-ca --no-rpm --force \
                    --dir="$SSL_BUILD_DIR" \
                    --set-country="$SSL_COUNTRY" \
                    --set-state="$SSL_STATE" \
                    --set-city="$SSL_CITY" \
                    --set-org="$SSL_ORG" \
                    --set-org-unit="$SSL_ORGUNIT" \
                    --set-common-name="${HOSTNAME}" \
                    --password="$SSL_CA_PASSWORD" \
                    2>>$LOG
		CML_SSL_CA_CERT=$SSL_BUILD_DIR/RHN-ORG-TRUSTED-SSL-CERT
            else
                echo " No need to generate a new SSL CA Certificate" | tee -a $LOG
            fi
            echo "rhn-ssl-tool --gen-server --no-rpm \
                --dir="$SSL_BUILD_DIR" \
                --set-country="$SSL_COUNTRY" \
                --set-state="$SSL_STATE" \
                --set-city="$SSL_CITY" \
                --set-org="$SSL_ORG" \
                --set-org-unit="$SSL_ORGUNIT" \
                --set-email="$SSL_EMAIL" \
                --set-hostname="${HOSTNAME}" \
            " >> $LOG
            rhn-ssl-tool --gen-server --no-rpm \
                --dir="$SSL_BUILD_DIR" \
                --set-country="$SSL_COUNTRY" \
                --set-state="$SSL_STATE" \
                --set-city="$SSL_CITY" \
                --set-org="$SSL_ORG" \
                --set-org-unit="$SSL_ORGUNIT" \
                --set-email="$SSL_EMAIL" \
                --set-hostname="${HOSTNAME}" \
                --password="$SSL_CA_PASSWORD" \
                2>>$LOG
            SERVER_NAME=$(echo "${HOSTNAME}" | perl -e '
	    my @hostname_parts = split(/\./, <STDIN>);
            my $system_name;
            if (scalar @hostname_parts > 2) {
              $system_name = join(".", splice(@hostname_parts, 0, -2));
            }
            else {
              $system_name = join(".", @hostname_parts);
            };
	    printf($system_name."\n");')
	    CML_SSL_SERVER_KEY=$SSL_BUILD_DIR/$SERVER_NAME/server.key
	    CML_SSL_SERVER_CERT=$SSL_BUILD_DIR/$SERVER_NAME/server.crt
        fi

        if [ ! -f $CML_SSL_SERVER_KEY -o ! -f $CML_SSL_SERVER_CERT ];
        then
            echo_err "Wrong SSL information provided. Check $LOG for more information." | tee -a $LOG
            bye
        fi
        if [ ! -f $CML_SSL_CA_CERT ];
	then
            echo_err "CA Certificate file not found. Check $LOG for more information." | tee -a $LOG
            bye
        fi
        echo -n "Making new SSL certificate publicly available ... " | tee -a $LOG
	/usr/bin/mgr-ssl-cert-setup \
                --root-ca-file=$CML_SSL_CA_CERT \
                --server-cert-file=$CML_SSL_SERVER_CERT \
                --server-key-file=$CML_SSL_SERVER_KEY
        print_status $?
    fi
}

function refresh_pillar {
    echo -n "Refreshing Salt minion pillar data, may take a while ... " | tee -a $LOG
    for ID in `spacecmd -q api system.listSystemsWithEntitlement -- -A "salt_entitled" -F "%(id)s"`
    do
        if [ -n "$SIDS" ]
        then
            SIDS="$SIDS,$ID"
        else
            SIDS="$SID"
        fi
    done
    SKIPPED=`spacecmd -q api system.refreshPillar -- -A "[\\\\\"general\\\\\",[$SIDS]]"`
    if [ "$SKIPPED" != "[]" ]
    then
        echo "Some minions pillar have not been refreshed: $SKIPPED" | tee -a $LOG
        print_status 1
    fi
    print_status 0
}
###############################################################################

echo "[$(date)]: $0 $*" >> $LOG

while [ $# -ge 1 ]; do
    if [[ "$1" =~ $IPADDR_REGEX ]]; then
        IP=$1
        shift
        continue
    fi

    case $1 in
            --help | -h)  echo_usage;;

            --ssl-country=*) CML_SSL_COUNTRY=$(echo $1 | cut -d= -f2-);;
            --ssl-state=*) CML_SSL_STATE=$(echo $1 | cut -d= -f2-);;
            --ssl-city=*) CML_SSL_CITY=$(echo $1 | cut -d= -f2-);;
            --ssl-org=*) CML_SSL_ORG=$(echo $1 | cut -d= -f2-);;
            --ssl-orgunit=*) CML_SSL_ORGUNIT=$(echo $1 | cut -d= -f2-);;
            --ssl-email=*) CML_SSL_EMAIL=$(echo $1 | cut -d= -f2-);;

            --ssl-ca-password=*) CML_SSL_CA_PASSWORD=$(echo $1 | cut -d= -f2-);;

            --ssl-ca-cert=*) CML_SSL_CA_CERT=$(echo $1 | cut -d= -f2-);;
            --ssl-server-cert=*) CML_SSL_SERVER_CERT=$(echo $1 | cut -d= -f2-);;
            --ssl-server-key=*) CML_SSL_SERVER_KEY=$(echo $1 | cut -d= -f2-);;
            *) echo_err "Error: Invalid option $1"
               echo_usage;;
    esac
    shift
done

if [ -n "${IP}" ]
then
    echo -n "Validating IP ... " | tee -a $LOG
    check_input_ip $IP
    print_status $? "IP $IP is not your valid IP address."
else
    echo_err "Missing <ip_address> argument."
    echo_usage
fi

# if the user has set one of these params,
# he wants to re-generate SSL certificate
for ssl_var in ${CML_SSL_COUNTRY} ${CML_SSL_STATE} ${CML_SSL_CITY} ${CML_SSL_ORG} ${CML_SSL_ORGUNIT} ${CML_SSL_EMAIL} ${CML_SSL_CA_PASSWORD}
do
    [ -n "${ssl_var}" ] && CML_NEW_SSL_CERT_REQUEST=1
done

if [ -n "${CML_SSL_CA_CERT}" -a -n "${CML_SSL_SERVER_KEY}" -a -n "${CML_SSL_SERVER_CERT}" ]
then
    CML_THIRD_PARTY_CERT=1
else
    if [ -n "${CML_SSL_CA_CERT}" -o -n "${CML_SSL_SERVER_CERT}" -o -n "${CML_SSL_SERVER_KEY}" ]
    then
        echo_err "Either all or none of --ssl-ca-cert, --ssl-server-key and --ssl-server-cert must be provided"
        echo_usage
    fi
fi

OLD_HOSTNAME=$(grep ^redhat_management_server /etc/cobbler/settings | awk '{print $2}')

echo "=============================================" | tee -a $LOG
echo "hostname: $HOSTNAME" | tee -a $LOG
echo "old hostname: $OLD_HOSTNAME"  | tee -a $LOG
echo "ip: $IP" | tee -a $LOG
echo "=============================================" | tee -a $LOG

initial_system_hostname_check || bye

echo "=============================================" | tee -a $LOG
echo "hostname: $HOSTNAME" | tee -a $LOG
echo "=============================================" | tee -a $LOG

# stop services
echo -n "Stopping spacewalk services ... " | tee -a $LOG
/usr/sbin/spacewalk-service stop >> $LOG 2>&1
if [ "$DB_SERVICE" != "" ]
then
    /sbin/service $DB_SERVICE start >> $LOG 2>&1
fi
print_status 0  # just simulate end

echo -n "Testing DB connection ... " | tee -a $LOG
# for spacewalk only:
if [ -e "$ORACLE_XE_LISTENER_ORA_FILE" ]
then
    sed -i$BACKUP_EXT "s/\(.*(HOST[[:space:]]*=[[:space:]]*\)[^)]*\().*$\)/\1$HOSTNAME\2/" $ORACLE_XE_LISTENER_ORA_FILE
fi
if [ -e $ORACLE_XE_TNSNAMES_ORA_FILE ]
then
    sed -i$BACKUP_EXT 's/\(.*(HOST[[:space:]]*=[[:space:]]*\)[^)]*\().*$\)/\1$HOSTNAME\2/' $ORACLE_XE_TNSNAMES_ORA_FILE
    if [ -e /etc/tnsnames.ora ]; then
        cp $ORACLE_TNSNAMES_ORA_FILE /etc/tnsnames.ora
    fi
    if [ -x /etc/init.d/oracle-xe ]; then
        /sbin/service oracle-xe restart >> $LOG 2>&1
    fi
fi
if [ -e "$ORACLE_LISTENER_ORA_FILE" ]
then
    sed -i$BACKUP_EXT "s/\(.*(HOST[[:space:]]*=[[:space:]]*\)[^)]*\().*$\)/\1$HOSTNAME\2/" $ORACLE_LISTENER_ORA_FILE
fi
if [ -e $ORACLE_TNSNAMES_ORA_FILE ]
then
    sed -i$BACKUP_EXT "s/\(.*(HOST[[:space:]]*=[[:space:]]*\)[^)]*\().*$\)/\1$HOSTNAME\2/" $ORACLE_TNSNAMES_ORA_FILE
    if [ -e /etc/tnsnames.ora ]; then
        cp $ORACLE_TNSNAMES_ORA_FILE /etc/tnsnames.ora
    fi
    if [ -x /etc/init.d/oracle ]; then
        /sbin/service oracle restart >> $LOG 2>&1
    fi
fi

/usr/sbin/spacewalk-startup-helper wait-for-database
print_status "${?}" "Your database isn't running."

echo -n "Updating /etc/rhn/rhn.conf ... " | tee -a $LOG
update_rhn_conf
print_status 0  # just simulate end

re-generate_server_ssl_certificate

echo -n "Regenerating new bootstrap client-config-overrides.txt ... " | tee -a $LOG
# it's easier to subst HOSTNAME with sed
# than to re-generate and keep current configuration
# rhn-bootstrap >> /dev/null 2>&1
if [ -e "$BOOTSTRAP_SH" ]
then
    backup_file ${BOOTSTRAP_SH}
    sed -i "s/\(HOSTNAME=\).*/\1$HOSTNAME/" ${BOOTSTRAP_SH}
fi
if [ -e "$BOOTSTRAP_CCO" ]
then
    backup_file ${BOOTSTRAP_CCO}
    sed -i "s/\(serverURL=https\?:\/\/\).*\(\/XMLRPC\)/\1$HOSTNAME\2/" ${BOOTSTRAP_CCO}
fi
print_status 0  # just simulate end

echo -n "Updating other DB entries ... " | tee -a $LOG
spacewalk-sql --select-mode - >>$LOG <<EOS
UPDATE rhntemplatestring SET value='$HOSTNAME' WHERE label='hostname';
COMMIT;
$DBSHELL_QUIT
EOS
print_status 0  # just simulate end

echo -n "Changing cobbler settings ... " | tee -a $LOG
/usr/bin/spacewalk-setup-cobbler >> $LOG 2>&1
print_status $?

echo -n "Changing kernel_options ... " | tee -a $LOG
spacewalk-sql --select-mode - >>$LOG <<EOS
UPDATE rhnKickstartableTree
SET kernel_options = REPLACE(kernel_options, '$OLD_HOSTNAME', '$HOSTNAME'),
    kernel_options_post = REPLACE(kernel_options_post, '$OLD_HOSTNAME', '$HOSTNAME');
COMMIT;
$DBSHELL_QUIT
EOS
for COBBLERDIR in /var/lib/cobbler/collections/*
do
    if [ -d $COBBLERDIR ] && [ ! -z "$(ls $COBBLERDIR)" ]; then
        for FILE in $COBBLERDIR/*
        do
            backup_file $FILE
            sed -i "s/$OLD_HOSTNAME/$HOSTNAME/g" $FILE
        done
    fi
done
print_status 0  # just simulate end

echo -n "Changing jabberd settings ... " | tee -a $LOG
# delete old dispatcher(s)
spacewalk-sql --select-mode - >>$LOG <<EOS
DELETE FROM rhnPushDispatcher WHERE hostname != '$HOSTNAME';
COMMIT;
$DBSHELL_QUIT
EOS

for jabber_config_file in c2s.xml s2s.xml sm.xml
do
    backup_file ${ETC_JABBERD_DIR}/${jabber_config_file}
done

# clean jabberd database
rm -f /var/lib/jabberd/db/*

if [ -e $SPW_SETUP_JABBERD ]
then
    $SPW_SETUP_JABBERD --macros "hostname:$HOSTNAME"
else
    /usr/bin/satcon-deploy-tree.pl \
        --source=/var/lib/rhn/rhn-satellite-prep/etc/jabberd \
        --dest=$ETC_JABBERD_DIR \
        --conf=$SAT_LOCAL_RULES_CONF \
       >> /dev/null 2>&1
fi
print_status $?

# change /root/.mgr-sync
if [ -e $MGR_SYNC_CONF ]; then
    backup_file $MGR_SYNC_CONF
    sed -i "s/$OLD_HOSTNAME/$HOSTNAME/g" $MGR_SYNC_CONF
    sed -i "s/^\(mgrsync.session.token \?=\).*$/\1/g" $MGR_SYNC_CONF
fi
print_status 0  # just simulate end

echo -n "Starting spacewalk services ... " | tee -a $LOG
if [ "$DB_SERVICE" != "" ]
then
/sbin/service $DB_SERVICE stop >> $LOG 2>&1
fi
if [ "$DB_SERVICE" == "postgresql" ]
then
/sbin/service $DB_SERVICE start >> $LOG 2>&1
fi
/usr/sbin/spacewalk-service start >> $LOG 2>&1
print_status 0  # just simulate end

# Refresh the minion pillar data since they contain the repos URLs with the old hostname
refresh_pillar

echo "[$(date)]: $(basename $0) finished successfully." >> $LOG
0707010000001B000081B400000000000000000000000162C3F37D00001941000000000000000000000000000000000000002800000000spacewalk-utils/sw-system-snapshot.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>sw-system-snapshot</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>sw-system-snapshot</command></RefName>
<RefPurpose>
List or delete system snapshots from the management server.
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>sw-system-snapshot</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--satellite=<replaceable>SATELLITE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-u<replaceable>USERNAME</replaceable></arg>
        <arg>--username=<replaceable>USERNAME</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-p<replaceable>PASSWORD</replaceable></arg>
        <arg>--password=<replaceable>PASSWORD</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-l</arg>
        <arg>--list</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-L</arg>
        <arg>--long-list</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-d</arg>
        <arg>--delete</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-a</arg>
        <arg>--all</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--system-id=<replaceable>SYSTEMID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--snapshot-id=<replaceable>SNAPSHOTID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--start-date=<replaceable>STARTDATE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>--end-date=<replaceable>ENDDATE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-v</arg><arg>--verbose</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg><arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para>
        <emphasis>sw-system-snapshot</emphasis> is used to list or delete system snapshots.
    </para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--satellite </term>
        <listitem>
            <para>Satellite server to run against.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-u<replaceable>USERNAME</replaceable>
        --username=<replaceable>USERNAME</replaceable></term>
        <listitem>
            <para>Satellite username to execute the command as.  Note: a user will only be able to list and delete snapshots that are accessible by this user's account.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-p<replaceable>PASSWORD</replaceable>
        --password=<replaceable>PASSWORD</replaceable></term>
        <listitem>
            <para>Password associated with the username provided.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-l, --list</term>
        <listitem>
            <para>List system snapshot summaries that meet the criteria provided.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-L, --long-list</term>
        <listitem>
            <para>List details about system snapshots that meet the criteria provided.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-d, --delete</term>
        <listitem>
            <para>Delete system snapshots that meet the criteria provided.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-a, --all</term>
        <listitem>
            <para>Perform the requested action on all system snapshots.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--system-id=<replaceable>SYSTEMID</replaceable></term>
        <listitem>
            <para>Perform the requested action for the system id specified.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--snapshot-id=<replaceable>SNAPSHOTID</replaceable></term>
        <listitem>
            <para>Perform the requested action for the snapshot id specified.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--start-date=<replaceable>STARTDATE</replaceable></term>
        <listitem>
            <para>Perform the requested action on all snapshots that were created on or after the date specified.  A start-date may be used to further narrow the snapshots affected when using the --all or --system-id options.</para>
            <para>The date must be in the following format: YYYYMMDDHH24MISS.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--end-date=<replaceable>ENDDATE</replaceable></term>
        <listitem>
            <para>Perform the requested action on all snapshots that were created on or before the date specified.  An end-date may be used to further narrow the snapshots affected when using the --all or --system-id options.</para>
            <para>The date must be in the following format: YYYYMMDDHH24MISS.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>Examples</Title>
<simplelist>
<member><command>sw-system-snapshot --list --all</command></member>
<member><command>sw-system-snapshot --list --all --start-date=20090403000000</command></member>
<member><command>sw-system-snapshot --list --system-id=1000010022</command></member>
<member><command>sw-system-snapshot --list --system-id=1000010022 --start-date=20090101000000 --end-date=20091231000000</command></member>
<member><command>sw-system-snapshot --delete --all</command></member>
<member><command>sw-system-snapshot --delete --all --start-date=20090403000000</command></member>
<member><command>sw-system-snapshot --delete --system-id=1000010022</command></member>
<member><command>sw-system-snapshot --delete --system-id=1000010022 --start-date=20090101000000 --end-date=20091231000000</command></member>
<member><command>sw-system-snapshot --delete --system-id=1000010022 --system-id=1000010033</command></member>
</simplelist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Brad Buckingham<email>bbuckingham@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
0707010000001C000081B400000000000000000000000162C3F37D00000101000000000000000000000000000000000000002700000000spacewalk-utils/sw-ldap-user-sync.confdirectory:
  user: uid=xyz,dc=example,dc=com
  password: xxx
  url: ldaps://ldap.example.com:636
  group: cn=admin,ou=groups,dc=example,dc=com
  users: ou=people,dc=example,dc=com
spacewalk:
  url: http://localhost/rpc/api
  user: spacewalk
  password: xxx
0707010000001D000081FD00000000000000000000000162C3F37D00000EE8000000000000000000000000000000000000002F00000000spacewalk-utils/delete-old-systems-interactive#!/usr/bin/python3
#
# Copyright (c) 2017 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

from __future__ import print_function
from datetime import datetime, timedelta
from optparse import Option, OptionParser
import re
import sys

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

def processCommandline(argv):
    optionsTable = [
        Option('--idle', action='store', dest='idle',
            help=''),
        Option('--host', action='store', dest='host', default='localhost',
            help=''),
        Option('--username', action='store', dest='username', default='admin',
            help=''),
        Option('--password', action='store', dest='passwd',
            help=''),
        Option('--force', action='store_true', dest='force', default=False,
            help=''),
    ]
    optionParser = OptionParser(
        usage="Usage: %s --idle=<idletime[w|d|h|m]> [--host=<host>] [--username=<username>] [--password=<password>] [--force]" % sys.argv[0],
        option_list=optionsTable)

    options = optionParser.parse_args(argv)[0]

    if not options.idle:
        sys.stderr.write('Need --idle parameter\n')
        sys.exit(1)

    if not options.passwd:
        passwdfile = '/etc/rhn/%s-password' % options.username
        try:
            with open(passwdfile, 'r') as f:
                options.passwd = f.read().splitlines()[0]
        except IOError:
            sys.stderr.write('Error reading password file [%s]: %s\n' % (passwdfile, sys.exc_info()[1]))

    try:
        t, w = re.compile('^(\d+)(\D)$').search(options.idle).groups()
    except AttributeError:
        t = options.idle
        w = 'd'

    try:
        t = int(t)
    except ValueError:
        sys.stderr.write('Unknown idle parameter [%s]\n' % options.idle)
        sys.exit(1)

    if w == 'm':
        options.idle = t * 60
    elif w == 'h':
        options.idle = t * 60 * 60
    elif w == 'd':
        options.idle = t * 60 * 60 * 24
    elif w == 'w':
        options.idle = t * 60 * 60 * 24 * 7
    else:
      sys.stderr.write('Unknown idle parameter [%s]\n' % options.idle)
      sys.exit(1)

    return options

if __name__ == '__main__':
    options = processCommandline(sys.argv)

    not_before = datetime.now() - timedelta(seconds=options.idle)

    print('Lookup on [%s] systems with last checkin before [%s]' % (options.host, not_before))

    client = xmlrpclib.Server('http://%s/rpc/api' % options.host, verbose=0)
    key = client.auth.login(options.username, options.passwd)

    systems = client.system.list_user_systems(key)
    to_delete = []
    for system in systems:
        print('System [%s] id [%s] last checking [%s]' % (system['name'], system['id'], system['last_checkin']), end='')
        if system['last_checkin'] < not_before:
            to_delete.append(system['id'])
            print(' -> delete', end='')
        print()

    if len(to_delete) == 0:
        print('Total systems [%s], none idle' % len(systems))
        sys.exit(0)

    if not options.force:
        print('Total systems [%s], would delete [%s]' % (len(systems), len(to_delete)))
    else:
        client.system.delete_systems(key, to_delete)
        print('All systems deleted')

    client.auth.logout(key)
0707010000001E000081FD00000000000000000000000162C3F37D0000313C000000000000000000000000000000000000002100000000spacewalk-utils/spacewalk-export#!/usr/bin/python3
#
# Utility for exporting Sat5 entity-data
#
# Copyright (c) 2014--2015 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

"""
spacewalk-export - a tool for preparing to move data from an existing Satellite-5 instance
to a Satellite-6 instance
"""

import csv
import logging
import os
import sys

from optparse import OptionParser, OptionGroup
from os.path import expanduser
from subprocess import call

home = expanduser("~")
DEFAULT_EXPORT_DIR = home + '/spacewalk-export-dir'
DEFAULT_EXPORT_PACKAGE = 'spacewalk_export.tar.gz'
REPORTS_DIR = 'exports'

SUPPORTED_ENTITIES = {
    'activation-keys': 'Activation keys',
    'channels': 'Custom/cloned channels and repositories for all organizations',
    'config-files-latest': 'Latest revision of all configuration files',
    'kickstart-scripts': 'Kickstart scripts for all organizations',
    'repositories': 'Defined repositories',
    'system-groups': 'System-groups for all organizations',
    'system-profiles': 'System profiles for all organizations',
    'users': 'Users and Organizations'
}

#
# Some sw-reports use org_id and some use organization-id
# Map report-to-org-id so we can make generic decisions later
#
REPORT_TO_ORG = {
    'activation-keys': 'org_id',
    'channels': 'org_id',
    'config-files-latest': 'org_id',
    'kickstart-scripts': 'org_id',
    'repositories': 'org_id',
    'system-groups': 'org_id',
    'system-profiles': 'organization_id',
    'users': 'organization_id'
}


def setupOptions():
    usage = 'usage: %prog [options]'
    parser = OptionParser(usage=usage)

    locGroup = OptionGroup(parser, "Locations", "Where do you want to export to?")
    locGroup.add_option('--export-dir', action='store', dest='export_dir',
                        metavar='DIR', default=DEFAULT_EXPORT_DIR,
                        help='Specify directory to store exports in (will be created if not found) - defaults to ' + DEFAULT_EXPORT_DIR)
    locGroup.add_option('--export-package', action='store', dest='export_package',
                        metavar='FILE', default=DEFAULT_EXPORT_PACKAGE,
                        help='Specify filename to use for final packaged-exports tarfile - defaults to ' + DEFAULT_EXPORT_PACKAGE)
    parser.add_option_group(locGroup)

    entGroup = OptionGroup(parser, "Entities", "What do you want to export?")
    entGroup.add_option('--list-entities', action='store_true', dest='list',
                        default=False, help='List supported entities')
    entGroup.add_option('--entities', action='store', dest='entities',
                        metavar='entity[,entity...]', default='all',
                        help='Specify comma-separated list of entities to export (default is all)')
    entGroup.add_option('--org', action='append', type='int', dest='org_ids',
                        metavar='ORG-ID', help='Specify an org-id whose data we will export')
    entGroup.add_option('--dump-repos', action='store_true', dest='dump_repos',
                        default=False, help='Dump contents of file: repositories')
    parser.add_option_group(entGroup)

    chanGroup = OptionGroup(parser, "Channels")
    chanGroup.add_option('--ext-pkgs', action='store_true', dest='extpkgs',
                         default=False, help='Channel-output contains only external packages')
    chanGroup.add_option('--skip-repogen', action='store_true', dest='skipregen',
                         default=False, help='Omit repodata generation for exported channels')
    chanGroup.add_option('--no-size', action='store_true', dest='nosize',
                         default=False, help='Do not check package size')
    parser.add_option_group(chanGroup)

    utilGroup = OptionGroup(parser, "Utility")
    utilGroup.add_option('--clean', action='store_true', default=False, dest='clean',
                         help='How do I clean up from previous runs?')
    utilGroup.add_option('--debug', action='store_true', default=False, dest='debug',
                         help='Log debugging output')
    utilGroup.add_option('--quiet', action='store_true', default=False, dest='quiet',
                         help='Log only errors')
    parser.add_option_group(utilGroup)

    return parser


def setupLogging(opt):
    # determine the logging level
    if opt.debug:
        level = logging.DEBUG
    elif opt.quiet:
        level = logging.ERROR
    else:
        level = logging.INFO
    # configure logging
    logging.basicConfig(level=level, format='%(levelname)s: %(message)s')
    return


def listSupported():
    logging.info('Currently-supported entities include:')
    for s in list(SUPPORTED_ENTITIES.keys()):
        logging.info('%20s : %s' % (s, SUPPORTED_ENTITIES[s]))

    return


def setupEntities(options):
    entities = {}
    doAll = options.entities == 'all'

    for s in list(SUPPORTED_ENTITIES.keys()):
        entities[s] = doAll

    if doAll:
        return entities

    for e in options.entities.split(','):
        if e in list(entities.keys()):
            entities[e] = True
        else:
            logging.error('ERROR: unsupported entity ' + e + ', skipping...')

    return entities


def setupOutputDir(options):
    if not os.path.isdir(options.export_dir):
        os.mkdir(options.export_dir, 0o700)
    if not os.path.isdir(options.export_dir + '/' + REPORTS_DIR):
        os.mkdir(options.export_dir + '/' + REPORTS_DIR, 0o700)

# Did we get an orgs-list? If so, return an array of --where entries


def _my_call(*args, **kwargs):
    rc = call(*args, **kwargs)
    if rc:
        logging.error('Error response from %s : [%s]' % (args[0][0], rc))
    return rc

def _generateWhere(options, reportname):
    where = []

    if options.org_ids:
        for org in options.org_ids:
            where.append('--where-%s=%s' % (REPORT_TO_ORG[reportname], org))

    return where


def _issueReport(options, reportname):
    report_file = '%s/%s/%s.csv' % (options.export_dir, REPORTS_DIR, reportname)
    where_clause = _generateWhere(options, reportname)
    logging.debug('...WHERE = %s' % where_clause)
    if len(where_clause) == 0:
        _my_call(['/usr/bin/spacewalk-report', reportname], stdout=open(report_file, 'w'))
    else:
        _my_call(['/usr/bin/spacewalk-report'] + where_clause + [reportname],
                stdout=open(report_file, 'w'))
    return report_file


def channelsDump(options):
    logging.info('Processing channels...')
    _issueReport(options, 'channels')
    # /usr/bin/spacewalk-export-channels -d options.export_dir + '/' + REPORTS_DIR + '/' + 'CHANNELS' -f FORCE
    # if extpkgs -e; if skipregen -s; if nosize -S
    extra_args = []
    if options.extpkgs:
        extra_args.append('-e')
    if options.skipregen:
        extra_args.append('-s')
    if options.nosize:
        extra_args.append('-S')
    if options.org_ids:
        # Go from list-of-ints to list-of-args-to-export - ['-o', 'oid1', '-o', 'oid2', ...]
        oid_args = ' '.join(['-o ' + i for i in map(str, options.org_ids)]).split(' ')
        extra_args = extra_args + oid_args

    logging.debug('...EXTRA = %s' % extra_args)

    channel_export_dir = options.export_dir + '/' + REPORTS_DIR + '/' + 'CHANNELS'
    if not os.path.isdir(channel_export_dir):
        os.mkdir(channel_export_dir, 0o700)
    _my_call(['/usr/bin/spacewalk-export-channels', '-d', channel_export_dir, '-f', 'FORCE'] + extra_args)
    return


def usersDump(options):
    logging.info('Processing users...')
    _issueReport(options, 'users')
    return


def systemgroupsDump(options):
    logging.info('Processing system-groups...')
    _issueReport(options, 'system-groups')
    return


def systemprofilesDump(options):
    logging.info('Processing system-profiles...')
    _issueReport(options, 'system-profiles')
    return


def activationkeysDump(options):
    logging.info('Processing activation-keys...')
    _issueReport(options, 'activation-keys')
    return


def repositoriesDump(options):
    logging.info('Processing repositories...')
    repo_file = _issueReport(options, 'repositories')
    if (options.dump_repos):
        logging.info('...repository dump requested')
        # Go thru the CSV we just dumped and look for file: repos
        handle = open(repo_file, 'r')
        repositories = csv.DictReader(handle)
        for entry in repositories:
            # file: protocol is file:// host/ path
            # Look for file::///<repo-location>
            if entry['source_url'].lower().startswith('file://'):
                logging.debug('Found file-repository : ' + entry['source_url'])
                # Strip off 'file://' to get absolute path
                repo_loc = entry['source_url'][7:]
                # Get the leading directory
                repo_dir = repo_loc.rsplit('/', 1)[0]
                # Get the repository directoryname
                repo_basename = repo_loc.rsplit('/', 1)[-1]
                # Tarfile name is 'repository_<repo-label>_contents.tar.gz'
                repo_tarname = 'repository_' + entry['repo_label'] + '_contents.tar.gz'
                logging.info('...storing file-repo %s into %s' % (repo_loc, repo_tarname))
                # Tar it up into the export-dir
                if options.debug:
                    _my_call(['/bin/tar', '-c', '-v', '-z',
                            '-C', repo_dir,
                            '-f', '%s/%s/%s' % (options.export_dir, REPORTS_DIR, repo_tarname),
                            repo_basename])
                else:
                    _my_call(['/bin/tar', '-c', '-z',
                            '-C', repo_dir,
                            '-f', '%s/%s/%s' % (options.export_dir, REPORTS_DIR, repo_tarname),
                            repo_basename])
        handle.close()
    return


def kickstartscriptsDump(options):
    logging.info('Processing kickstart-scripts...')
    _issueReport(options, 'kickstart-scripts')
    return


def configfileslatestDump(options):
    logging.info('Processing config-files...')
    _issueReport(options, 'config-files-latest')
    return


def prepareExport(options):
    if options.debug:
        rc = _my_call(['/bin/tar', '-c', '-v', '-z',
              '--owner', 'apache', '--group', 'apache',
              '-C', options.export_dir,
              '-f', '%s/%s' % (options.export_dir, options.export_package),
              REPORTS_DIR])
    else:
        rc = _my_call(['/bin/tar', '-c', '-z',
              '--owner', 'apache', '--group', 'apache',
              '-C', options.export_dir,
              '-f', '%s/%s' % (options.export_dir, options.export_package),
              REPORTS_DIR])

    if rc:
        logging.error('Error attempting to create export-file at %s/%s' %
                (options.export_dir, options.export_package))
        sys.exit(1)
    else:
        logging.info('Export-file created at %s/%s' %
                (options.export_dir, options.export_package))


def cleanup(options):
    logging.info('To clean up, issue the following command:')
    logging.info('sudo rm -rf %s' % (options.export_dir))
    logging.info('NOTE:  No, I will not do it for you!')
    return


def checkSuperUser():
    if os.geteuid() != 0:
        print("You must be root to run this!")
        sys.exit(1)


if __name__ == '__main__':
    parser = setupOptions()
    (options, args) = parser.parse_args()
    setupLogging(options)
    logging.debug('OPTIONS = %s' % options)

    if (options.list):
        listSupported()
        sys.exit(0)

    checkSuperUser()

    if (options.clean):
        cleanup(options)
        sys.exit(0)

    entities = setupEntities(options)
    setupOutputDir(options)

    for entity in list(entities.keys()):
        if (entities[entity]):
            logging.debug('DUMPING ' + entity)
            # dump-function name is <entity>Dump(options)
            # BUT - entity-names can have dashes, function names do NOT
            globals()[entity.lower().replace('-', '') + 'Dump'](options)
        else:
            logging.debug('SKIPPING ' + entity)

    prepareExport(options)
0707010000001F000081B400000000000000000000000162C3F37D00000E2D000000000000000000000000000000000000002C00000000spacewalk-utils/migrate-system-profile.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>migrate-system-profile</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>migrate-system-profile</command></RefName>
<RefPurpose>
Migrate a system between organizations on the Red Hat Satellite server .
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>migrate-system-profile</command>
        <arg>options <replaceable>...</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg> --satellite=<replaceable>SATELLITE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-u<replaceable>USERNAME</replaceable></arg>
        <arg>--username=<replaceable>USERNAME</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-p<replaceable>PASSWORD</replaceable></arg>
        <arg>--password=<replaceable>PASSWORD</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg> --systemId=<replaceable>SYSTEMID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg> --to-org-id=<replaceable>TARGET_ORG_ID</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg> --csv=<replaceable>CSV_FILE</replaceable> </arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-v</arg><arg> --verbose </arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <arg>-h</arg><arg>--help</arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para>
        <emphasis>migrate-system-profile</emphasis> migrates a system from one org to another.
    </para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--satellite </term>
        <listitem>
            <para> Satellite server on which migration needs to run. </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-u<replaceable>USERNAME</replaceable>
        --username=<replaceable>USERNAME</replaceable></term>
        <listitem>
            <para>username of user that has administrative access to migrate a system from the source org.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-p<replaceable>PASSWORD</replaceable>
        --password=<replaceable>PASSWORD</replaceable></term>
        <listitem>
            <para>>password of user that has administrative access to migrate a system from the source org.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--systemId=<replaceable>SYSTEMID</replaceable></term>
        <listitem>
            <para> client system id to be migrated on the satellite server. </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--to-org-id=<replaceable>TARGET_ORG_ID</replaceable></term>
        <listitem>
            <para> Target org id the client system will be migrated to. </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--csv=<replaceable>CSV_FILE</replaceable> </term>
        <listitem>
            <para> CSV file with data to be migrated. Each line should be of the format:
                     systemId,to-org-id </para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>


<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Pradeep Kilambi <email>pkilambi@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
07070100000020000081B400000000000000000000000162C3F37D00002168000000000000000000000000000000000000001E00000000spacewalk-utils/taskotop.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>taskotop</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>taskotop</command></RefName>
<RefPurpose>
Utility to monitor what the taskomatic daemon is doing.
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
    <cmdsynopsis>
        <command>taskotop</command>
        <group>
	    <arg choice="plain">-e</arg>
	    <arg choice="plain">-r</arg>
	</group>
        <arg>-m <replaceable>MAXAGE</replaceable></arg>
        <arg>-n <replaceable>NUMITERATIONS</replaceable></arg>
        <arg>-t</arg>
        <arg>-v<arg>-v<arg>-v<arg>-v</arg></arg></arg></arg>
        <arg>--hide-elapsed</arg>
        <arg>--show-start</arg>
        <arg>--logfile <replaceable>LOGFILE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>taskotop</command>
        <group>
	    <arg choice="plain">--each-task</arg>
	    <arg choice="plain">--recent-history</arg>
	</group>
        <arg>--max-age <replaceable>MAXAGE</replaceable></arg>
        <arg>-n <replaceable>NUMITERATIONS</replaceable></arg>
        <arg>--taskomatic</arg>
        <arg>--verbose<arg>--verbose<arg>--verbose<arg>--verbose</arg></arg></arg></arg>
        <arg>--hide-elapsed</arg>
        <arg>--show-start</arg>
        <arg>--logfile <replaceable>LOGFILE</replaceable></arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>taskotop</command>
        <arg>-h</arg>
    </cmdsynopsis>
    <cmdsynopsis>
        <command>taskotop</command>
        <arg>--help</arg>
    </cmdsynopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para>
        <command>taskotop</command> is a utility that displays the status of currently-executing or recently-completed taskomatic tasks.  It optionally displays information about the running taskomatic daemon.
    </para>
    <para>
        <command>taskotop</command> provides an updating terminal-window view of the currently-executing taskomatic tasks, similar to the way the <application>top</application> command works for general processes. It provides the following information for tasks that are currently executing or which have finished within the last minute:
        <variablelist>
	  <varlistentry>
	    <term>RUN ID</term>
	    <listitem>
	      <para>
		Unique ID of the run. An ID can be used to obtain additional information. If a specific task produced any standard output or error output during its run, it can be found in /var/log/rhn/tasko/sat/*/*_RUN_ID_{out,err}.
	      </para>
	    </listitem>
	  </varlistentry>
	  <varlistentry>
	    <term>TASK NAME</term>
	    <listitem>
	      <para>
		Name of a taskomatic task as shown on Admin/Task Schedules, 'Bunch' details. For example, the <emphasis>cleanup-data-bunch</emphasis> includes the TASK NAMEs 'cleanup-packagechangelog-data' and 'cleanup-timeseries-data'.
	      </para>
	    </listitem>
	  </varlistentry>	  
    	  <varlistentry>
	    <term>START</term>
	    <listitem>
	      <para>
		The date and time the task run was started.  This column of information is hidden by default and can be enabled with the <option>--show-start</option> command line argument.
	      </para>
	    </listitem>
	  </varlistentry>
	  <varlistentry>
	    <term>ELAPSED</term>
	    <listitem>
	      <para>
		How long the task has been running.  This column of information is displayed by default and can be hidden with the <option>--hide-elapsed</option> command line option.
	      </para>
	    </listitem>
	  </varlistentry>
	  <varlistentry>
	    <term>END</term>
	    <listitem>
	      <para>
		The date and time the task run ended.
	      </para>
	    </listitem>
	  </varlistentry>
	  <varlistentry>
	    <term>CHANNEL</term>
	    <listitem>
	      <para>
		When a channel-repodata task is in the running state, this column shows the label of the channel for which taskomatic is currently processing data.
	      </para>
	    </listitem>
	  </varlistentry>
        </variablelist>
    </para>
    <para>
        The general taskomatic log file can be found at /var/log/rhn/rhn_taskomatic_daemon.log
    </para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>
	        Display the help screen with a list of options.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-e, --each-task</term>
        <listitem>
            <para>
                Display most recent run for each task instead of recent task run history.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-r, --recent-history</term>
	<listitem>
	    <para>
	        Display recent history of task runs. This is the default display mode.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-H, --human-readable</term>
	<listitem>
	    <para>
                Use human readable time output. Time will be displayed in the format [[days:]hours:]min:sec instead of total seconds.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-m <replaceable>MAXAGE</replaceable>, --max-age <replaceable>MAXAGE</replaceable></term>
	<listitem>
	    <para>
                Retrieve past events up to this old (in seconds, default 60). This has no effect if -e/--each-task is specified.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-n <replaceable>NUMITERATIONS</replaceable></term>
	<listitem>
	    <para>
                <command>taskotop</command> will iterate the specified number of times and then exit. If not specified or 0 (the default), <command>taskotop</command> will run until the user exits taskotop.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-t, --taskomatic</term>
	<listitem>
	    <para>
                Include taskomaticd process information in the output.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>-v [-v [-v [-v] ] ], --verbose [--verbose [--verbose [--verbose] ] ]</term>
	<listitem>
	    <para>
                Increase log output verbosity. Specify multiple times, up to 4 to increase verbosity.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>--hide-elapsed</term>
	<listitem>
	    <para>
                Hide the ELAPSED column in the display.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
	<term>--show-start</term>
	<listitem>
	    <para>
                Include the START column in the display.
	    </para>
	</listitem>
    </varlistentry>
    <varlistentry>
        <term>--logfile <replaceable>LOGFILE</replaceable></term>
        <listitem>
            <para>
                Specify logfile to use if at least one verbose arg specified. Default is ./taskotop.log
	    </para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>Interactive Commands</Title>
<para>
    While <command>taskotop</command> is running, these one-character interactive commands may be used.
</para>
<variablelist>
    <varlistentry>
        <term>e</term>
        <listitem>
            <para>
	        Change display mode to show each task's latest run.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>h</term>
        <listitem>
            <para>
	        Display the interactive command help page.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>H</term>
        <listitem>
            <para>
	        Toggle human readable format.  This toggles the time display between [[days:]hours:]min:sec format and total seconds.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>q</term>
        <listitem>
            <para>
	        Quit <command>taskotop</command>.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>r</term>
        <listitem>
            <para>
	        Change display mode to show recent history of task runs.
	    </para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>t</term>
        <listitem>
            <para>
	        Toggle taskomatic process information display.
	    </para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Tomas Kasparek<email>tkasparek@redhat.com</email></member>
    <member>Eric Herget<email>eherget@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
07070100000021000081B400000000000000000000000162C3F37D00002757000000000000000000000000000000000000002A00000000spacewalk-utils/spacewalk-sync-setup.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>spacewalk-sync-setup</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>spacewalk-sync-setup</command></RefName>
<RefPurpose>
Utility for setting up master and slave relationships for synchronization between multiple servers
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
  <cmdsynopsis>
    <command>spacewalk-sync-setup</command>
    <arg>options <replaceable>...</replaceable></arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--ss=<replaceable>SLAVE</replaceable></arg>
    <arg>--slave-server=<replaceable>SLAVE</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--sl=<replaceable>SLAVE_LOGIN</replaceable></arg>
    <arg>--slave-login=<replaceable>SLAVE_LOGIN</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--sp=<replaceable>SLAVE_PASSWORD</replaceable></arg>
    <arg>--slave-password=<replaceable>SLAVE_PASSWORD</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--ms=<replaceable>MASTER</replaceable></arg>
    <arg>--master-server=<replaceable>MASTER</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--ml=<replaceable>MASTER_LOGIN</replaceable></arg>
    <arg>--master-login=<replaceable>MASTER_LOGIN</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--mp=<replaceable>MASTER_PASSWORD</replaceable></arg>
    <arg>--master-password=<replaceable>MASTER_PASSWORD</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--md</arg>
    <arg>--master-default</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--cst</arg>
    <arg>--create-slave-template</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--cmt</arg>
    <arg>--create-master-template</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--ct</arg>
    <arg>--create-templates</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--msf=<replaceable>FILE</replaceable></arg>
    <arg>--master-setup-file=<replaceable>FILE</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--ssf=<replaceable>FILE</replaceable></arg>
    <arg>--slave-setup-file=<replaceable>FILE</replaceable></arg>    
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--dry-run</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--apply</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>--default-ok</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>-h</arg>
    <arg>--help</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>-d</arg>
    <arg>--debug</arg>
  </cmdsynopsis>
  <cmdsynopsis>
    <arg>-q</arg>
    <arg>--quiet</arg>
  </cmdsynopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
  <para>
    <command>spacewalk-sync-setup</command> is a utility that sets up master and slave relationships for synchronization between multiple servers.  Its purpose is to ease the burden required to map master-to-slave orgs.
  </para>
  <para>
    A typical use-case workflow for the <command>spacewalk-sync-setup</command> utility will likely be users that have a master instance with a set of orgs that are identical to a set of orgs on the slave instance and will be synchronizing content from that master.  Note that the names of the orgs are the key thing - because two servers do not share the same database, the ord ids in the database are not viable to use as identifiers between the master and slave servers.
  </para>
  <para>
    In this example use-case, there is a master instance with orgs Org1..OrgN and a slave instance with orgs Org1..OrgN.  The master has three channels, Org1-Public, Org1-Protected and Org1-Private.  Org1-Protected is visible only to Org1 and Org3.  The goal is to synchronize the slave to the master and end up with the same visibility.
  </para>
  <para>
    A user that has access to the admin role on both master and slave servers would issue the following command on any machine that can access the public XMLRPC API of both instances:
  </para>
  <para>
    <command>spacewalk-sync-setup --ms=<replaceable>MASTER_FQDN</replaceable> --ml=<replaceable>MASTER_LOGIN</replaceable> --mp=<replaceable>MASTER_PASSWORD</replaceable> --ss=<replaceable>SLAVE_FQDN</replaceable> --sl=<replaceable>SLAVE_LOGIN</replaceable> --sp=<replaceable>SLAVE_PASSWORD</replaceable> --create-templates --apply</command>
  </para>
  <para>
    At this point, the customer could then issue the <command>satellite-sync</command> for each of their customer channels:
  </para>
  <para>
    <command>satellite-sync -c Org1-Public -c Org1-Protected -c Org1-Private</command>
  </para>
  <para>
    and at the end of the sync, see the channels correctly synchronized, with a trust-hierarchy and channel-permissions setup matching that on the master.
  </para>
</RefSect1>

<RefSect1><Title>Connection Options</Title>
  <para>
    Identify the instances we're going to connect to.
  </para>
  <variablelist>
    <varlistentry>
      <term>-ss=<replaceable>SLAVE</replaceable>, --slave-server=<replaceable>SLAVE</replaceable></term>
      <listitem>
        <para>
	  Name of a slave to connect to.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--sl=<replaceable>SLAVE_LOGIN</replaceable>, --slave-login=<replaceable>SLAVE_LOGIN</replaceable></term>
      <listitem>
        <para>
	  A sat-admin login for slave-server.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--sp=<replaceable>SLAVE_PASSWORD</replaceable>, --slave-password=<replaceable>SLAVE_PASSWORD</replaceable></term>
      <listitem>
        <para>
	  Password for login slave-login on slave-server.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--ms=<replaceable>MASTER</replaceable>, --master-server=<replaceable>MASTER</replaceable></term>
      <listitem>
        <para>
	  Name of a master to connect to.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--ml=<replaceable>MASTER_LOGIN</replaceable>, --master-login=<replaceable>MASTER_LOGIN</replaceable></term>
      <listitem>
        <para>
	  A sat-admin login for master-server.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--mp=<replaceable>MASTER_PASSWORD</replaceable>, --master-password=<replaceable>MASTER_PASSWORD</replaceable></term>
      <listitem>
        <para>
	  Password for login master-login on master-server.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--md, --master-default</term>
      <listitem>
        <para>
	  Should the specified master be made the default-master in a specified template-file.
	</para>
      </listitem>
    </varlistentry>
  </variablelist>
</RefSect1>

<RefSect1><Title>Template Options</Title>
  <para>
    Options for creating initial versions of the setup files.
  </para>
  <para>
    NOTE: This will replace existing maching-specific stanzas with new content.
  </para>
  <variablelist>
    <varlistentry>
      <term>--cst, --create-slave-template</term>
      <listitem>
        <para>
	  Create/update a setup file containing a stanza for the slave we're pointed at, based on information from the master we're pointed at.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--cmt, --create-master-template</term>
      <listitem>
        <para>
	  Create/update a setup file stanza for the master we're pointed at, based on information from the slave we're pointed at.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--ct, --create-templates</term>
      <listitem>
        <para>
	  Create both a master and a slave setup file, for the master/slave pair we're pointed at.
	</para>
      </listitem>
    </varlistentry>
  </variablelist>
</RefSect1>

<RefSect1><Title>Setup Options</Title>
  <para>
    Specify the setup files we're actually going to apply to a slave/master.
  </para>
  <variablelist>
    <varlistentry>
      <term>--msf=<replaceable>FILE</replaceable>, --master-setup-file=<replaceable>FILE</replaceable></term>
      <listitem>
        <para>
	  Specify the master-setup-file we should use.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--ssf=<replaceable>FILE</replaceable>, --slave-setup-file=<replaceable>FILE</replaceable></term>
      <listitem>
        <para>
	  Specify the slave-setup-file we should use.
	</para>
      </listitem>
    </varlistentry>
  </variablelist>
</RefSect1>

<RefSect1><Title>Action Options</Title>
  <para>
    Should we actually affect the specified instances?
  </para>
  <variablelist>
    <varlistentry>
      <term>--dry-run</term>
      <listitem>
        <para>
	  Don't actually change anything, but tell us what you would have done.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--apply</term>
      <listitem>
        <para>
	  Make the changes specified by the setup files to the specified spacewalk instances.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>--default-ok</term>
      <listitem>
        <para>
	  Even if I don't explicitly tell you about a master or slave on the command-line, it's ok to try to apply changes to them!
	</para>
      </listitem>
    </varlistentry>
  </variablelist>
</RefSect1>

<RefSect1><Title>Utility Options</Title>
  <variablelist>
    <varlistentry>
      <term>-h, --help</term>
      <listitem>
        <para>
	  Show the help message and exit.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>-d, --debug</term>
      <listitem>
        <para>
	  Log debugging output.
	</para>
      </listitem>
    </varlistentry>
    <varlistentry>
      <term>-q, --quiet</term>
      <listitem>
        <para>
	  Log only errors.
	</para>
      </listitem>
    </varlistentry>
  </variablelist>
</RefSect1>

<RefSect1><Title>Authors</Title>
  <simplelist>
    <member>Grant Gainey<email>ggainey@redhat.com</email></member>
    <member>Eric Herget<email>eherget@redhat.com</email></member>
  </simplelist>
</RefSect1>
</RefEntry>
07070100000022000081B400000000000000000000000162C3F37D00020272000000000000000000000000000000000000002E00000000spacewalk-utils/spacewalk-common-channels.ini#
# These are definitions of common channels and their names in Spacewalk.
#
# Every section defines a set channels (base or child) for a given list
# of architectures.
#
# Mandatory options for a section (have to be defined or inherited from [DEFAULT]):
#       label
#       name
#       summary
#       archs
#       checksum
# Optional options for a section:
#       activationkey
#       base_channels (child channel has to have it)
#
# Base channels have to be defined before child channels.
#

[DEFAULT]
label = %(section)s-%(arch)s
checksum = sha1
summary  = %(name)s
activationkey = %(label)s
gpgkey_url = %(base_channel_gpgkey_url)s
gpgkey_id = %(base_channel_gpgkey_id)s
gpgkey_fingerprint = %(base_channel_gpgkey_fingerprint)s
yum_repo_label = External - %(name)s
## now define some "macros"
_x86_archs = i386, x86_64
_spacewalk_nightly_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly/pubkey.gpg
_spacewalk_nightly_gpgkey_id = DBA67EA3
_spacewalk_nightly_gpgkey_fingerprint = 85AC E11D 3A41 8D77 FC4F  0F30 E481 344A DBA6 7EA3
_spacewalk_nightlyclient_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly-client/pubkey.gpg
_spacewalk_nightlyclient_gpgkey_id = 1B9881E5
_spacewalk_nightlyclient_gpgkey_fingerprint = 44B8 3746 B8D6 0089 EC0D  5479 D4B9 8439 1B98 81E5
_spacewalk_28_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8/pubkey.gpg
_spacewalk_28_gpgkey_id = BAD596D6
_spacewalk_28_gpgkey_fingerprint = E810 498D 6A68 D4B8 C919  2EDB E26A 9033 BAD5 96D6
_spacewalk_28client_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8-client/pubkey.gpg
_spacewalk_28client_gpgkey_id = BE4B0BC2
_spacewalk_28client_gpgkey_fingerprint = FF8D 0364 E7B5 EFD3 4FBF  9CD1 BDAA D72F BE4B 0BC2
_spacewalk_29_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.9/pubkey.gpg
_spacewalk_29_gpgkey_id = 6E5454FA
_spacewalk_29_gpgkey_fingerprint = E089 1A20 65C5 4DDD 4E12  B8CA AD32 9E0D 6E54 54FA
_spacewalk_29client_gpgkey_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.9-client/pubkey.gpg
_spacewalk_29client_gpgkey_id = 9BD837BA
_spacewalk_29client_gpgkey_fingerprint = A353 22CF 3B3F 9C71 101E  B74B 49FC 0FD8 9BD8 37BA
# Uyuni
_uyuni_gpgkey_id = 0D20833E
_uyuni_gpgkey_fingerprint = 62F0 28DE 22F8 BF49 B88B C9E5 972E 5D6C 0D20 833E

[fedora29]
archs    = %(_x86_archs)s
checksum = sha256
name     = Fedora 29 (%(arch)s)
gpgkey_url = https://getfedora.org/static/429476B4.txt
gpgkey_id = 429476B4
gpgkey_fingerprint = 5A03 B4DD 8254 ECA0 2FDA  1637 A20A A56B 4294 76B4
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-29&arch=%(arch)s
dist_map_release = 29

[fedora29-modular]
label    = %(base_channel)s-modular
name     = Fedora 29 Modular (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora29-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-modular-29&arch=%(arch)s

[fedora29-updates]
label    = %(base_channel)s-updates
name     = Fedora 29 Updates (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora29-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-f29&arch=%(arch)s

[fedora29-updates-modular]
label    = %(base_channel)s-updates-modular
name     = Fedora 29 Updates Modular (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora29-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-modular-f29&arch=%(arch)s

[fedora29-debug]
label    = %(base_channel)s-debug
name    = Fedora 29 Debug (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora29-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-debug-29&arch=%(arch)s

[fedora29-updates-debug]
label    = %(base_channel)s-updates-debug
name    = Fedora 29 Updates Debug (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora29-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-debug-f29&arch=%(arch)s

[fedora30]
archs    = %(_x86_archs)s
checksum = sha256
name     = Fedora 30 (%(arch)s)
gpgkey_url = https://getfedora.org/static/fedora.gpg
gpgkey_id = CFC659B9
gpgkey_fingerprint = F1D8 EC98 F241 AAF2 0DF6  9420 EF3C 111F CFC6 59B9
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-30&arch=%(arch)s
dist_map_release = 30

[fedora30-modular]
label    = %(base_channel)s-modular
name     = Fedora 30 Modular (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora30-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-modular-30&arch=%(arch)s

[fedora30-updates]
label    = %(base_channel)s-updates
name     = Fedora 30 Updates (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora30-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-f30&arch=%(arch)s

[fedora30-updates-modular]
label    = %(base_channel)s-updates-modular
name     = Fedora 30 Updates Modular (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora30-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-modular-f30&arch=%(arch)s

[fedora30-debug]
label    = %(base_channel)s-debug
name    = Fedora 30 Debug (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora30-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=fedora-debug-30&arch=%(arch)s

[fedora30-updates-debug]
label    = %(base_channel)s-updates-debug
name    = Fedora 30 Updates Debug (%(arch)s)
archs    = %(_x86_archs)s
checksum = sha256
base_channels = fedora30-%(arch)s
repo_url = https://mirrors.fedoraproject.org/metalink?repo=updates-released-debug-f30&arch=%(arch)s

[centos8]
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 (%(arch)s)
gpgkey_url = http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-Official
gpgkey_id = 8483C65D
gpgkey_fingerprint = 99DB 70FA E1D7 CE22 7FB6 4882 05B5 55B3 8483 C65D
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=BaseOS&infra=stock
dist_map_release = 8

[centos8-appstream]
label    = %(base_channel)s-appstream
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 AppStream (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=AppStream&infra=stock

[centos8-centosplus]
label    = %(base_channel)s-centosplus
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 Plus (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=centosplus&infra=stock

[centos8-cr]
label    = %(base_channel)s-cr
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 CR (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=cr&infra=stock

[centos8-extras]
label    = %(base_channel)s-extras
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 Extras (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=extras&infra=stock

[centos8-fasttrack]
label    = %(base_channel)s-fasttrack
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 FastTrack (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=fasttrack&infra=stock

[centos8-powertools]
label    = %(base_channel)s-powertools
archs    = x86_64, ppc64le, aarch64
name     = CentOS 8 PowerTools (%(arch)s)
base_channels = centos8-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=8&arch=%(arch)s&repo=PowerTools&infra=stock

[centos8-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, ppc64le, aarch64
base_channels = centos8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/

[centos8-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, ppc64le, aarch64
base_channels = centos8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/

[epel8]
label    = epel8-%(base_channel)s
name     = EPEL 8 for %(base_channel_name)s
archs    = x86_64, ppc64le, aarch64
base_channels = centos8-%(arch)s
gpgkey_url = http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8
gpgkey_id = 2F86D6A1
gpgkey_fingerprint = 94E2 79EB 8D8F 25B2 1810 ADF1 21EA 45AB 2F86 D6A1
repo_url = http://mirrors.fedoraproject.org/mirrorlist?repo=epel-8&arch=%(arch)s

[sdl8]
archs    = x86_64
name     = Springdale Linux 8 BaseOS (%(arch)s)
gpgkey_url = http://springdale.princeton.edu/data/springdale/7/%(arch)s/os/RPM-GPG-KEY-springdale
gpgkey_id = 41A40948
gpgkey_fingerprint = 2266 6129 E0CD 0062 8D44 C6CF 16CF C333 41A4 0948
repo_url = http://springdale.princeton.edu/data/springdale/8/%(arch)s/os/BaseOS/mirrorlist
dist_map_release = 8

[sdl8-updates]
label    = %(base_channel)s-updates
archs    = x86_64
name     = Springdale Linux 8 BaseOS Updates (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/8/%(arch)s/os/Updates_BaseOS/mirrorlist

[sdl8-appstream]
label    = %(base_channel)s-appstream
archs    = x86_64
name     = Springdale Linux 8 AppStream (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/8/%(arch)s/os/AppStream/mirrorlist

[sdl8-appstream-updates]
label    = %(base_channel)s-appstream-updates
archs    = x86_64
name     = Springdale Linux 8 AppStream Updates (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/8/%(arch)s/os/Updates_AppStream/mirrorlist

[sdl8-unsupported]
label    = %(base_channel)s-unsupported
archs    = x86_64
name     = Springdale Linux 8 Unsupported (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/unsupported/8/%(arch)s/mirrorlist

[sdl8-computational]
label    = %(base_channel)s-computational
archs    = x86_64
name     = Springdale Linux 8 Computational (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/computational/8/%(arch)s/mirrorlist

[sdl8-buildsys]
label    = %(base_channel)s-buildsys
archs    = x86_64
name     = Springdale Linux 8 Build System (%(arch)s)
base_channels = sdl8-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/buildsys/8/os/%(arch)s/

[sdl8-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = sdl8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/

[sdl8-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s
base_channels = sdl8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/

[centos7]
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 (%(arch)s)
gpgkey_url = http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-7
gpgkey_id = F4A80EB5
gpgkey_fingerprint = 6341 AB27 53D7 8A78 A7C2  7BB1 24C6 A8A7 F4A8 0EB5
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=os
dist_map_release = 7

[centos7-atomic]
label    = %(base_channel)s-atomic
archs    = x86_64
name     = CentOS 7 Atomic (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/atomic/%(arch)s

[centos7-centosplus]
label    = %(base_channel)s-centosplus
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 Plus (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=centosplus

[centos7-cloud]
label    = %(base_channel)s-cloud
archs    = x86_64
name     = CentOS 7 Cloud (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/cloud/%(arch)s

[centos7-cr]
label    = %(base_channel)s-cr
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 CR (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=cr

[centos7-extras]
label    = %(base_channel)s-extras
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 Extras (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=extras

[centos7-fasttrack]
label    = %(base_channel)s-fasttrack
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 FastTrack (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=fasttrack

[centos7-opstools]
label    = %(base_channel)s-opstools
archs    = x86_64
name     = CentOS 7 OpsTools (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/opstools/%(arch)s

[centos7-paas]
label    = %(base_channel)s-paas
archs    = x86_64
name     = CentOS 7 PaaS (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/paas/%(arch)s

[centos7-rt]
label    = %(base_channel)s-rt
archs    = x86_64
name     = CentOS 7 RT (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/rt/%(arch)s

[centos7-sclo]
label    = %(base_channel)s-sclo
archs    = x86_64
name     = CentOS 7 Sclo (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/sclo/%(arch)s

[centos7-storage]
label    = %(base_channel)s-storage
archs    = x86_64
name     = CentOS 7 Storage (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/storage/%(arch)s

[centos7-updates]
label    = %(base_channel)s-updates
archs    = x86_64, ppc64le, aarch64
name     = CentOS 7 Updates (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirrorlist.centos.org/?release=7&arch=%(arch)s&repo=updates

[centos7-virt]
label    = %(base_channel)s-virt
archs    = x86_64
name     = CentOS 7 Virt (%(arch)s)
base_channels = centos7-%(arch)s
repo_url = http://mirror.centos.org/centos-7/7/virt/%(arch)s

[centos7-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, ppc64le, aarch64
base_channels = centos7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[centos7-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, ppc64le, aarch64
base_channels = centos7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[epel7]
label    = epel7-%(base_channel)s
name     = EPEL 7 for %(base_channel_name)s
archs    = x86_64, ppc64, ppc64le, aarch64
base_channels = centos7-%(arch)s scientific7-%(arch)s
gpgkey_url = http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
gpgkey_id = 352C64E5
gpgkey_fingerprint = 91E9 7D7C 4A5E 96F1 7F3E  888F 6A2F AEA2 352C 64E5
repo_url = http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=%(arch)s

[sdl7]
archs    = %(_x86_archs)s
name     = Springdale Linux 7 (%(arch)s)
gpgkey_url = http://springdale.princeton.edu/data/springdale/7/%(arch)s/os/RPM-GPG-KEY-springdale
gpgkey_id = 41A40948
gpgkey_fingerprint = 2266 6129 E0CD 0062 8D44 C6CF 16CF C333 41A4 0948
repo_url = http://springdale.princeton.edu/data/springdale/7/%(arch)s/os/mirrorlist
dist_map_release = 7

[sdl7-updates]
label    = %(base_channel)s-updates
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Updates (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/updates/7/en/os/%(arch)s/mirrorlist

[sdl7-addons]
label    = %(base_channel)s-addons
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Addons (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/7/%(arch)s/os/Addons/mirrorlist

[sdl7-addons-updates]
label    = %(base_channel)s-addons-updates
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Addons Updates (%(arch)s)
base_channels = sdl7-addons-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/updates/7/en/Addons/%(arch)s/mirrorlist

[sdl7-unsupported]
label    = %(base_channel)s-unsupported
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Unsupported (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/unsupported/7/%(arch)s/mirrorlist

[sdl7-computational]
label    = %(base_channel)s-computational
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Computational (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/computational/7/%(arch)s/mirrorlist

[sdl7-storage]
label    = %(base_channel)s-storage
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Storage (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/storage/7/%(arch)s/mirrorlist

[sdl7-scl]
label    = %(base_channel)s-scl
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Software Collections (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/SCL/7/%(arch)s/

[sdl7-buildsys]
label    = %(base_channel)s-buildsys
archs    = %(_x86_archs)s
name     = Springdale Linux 7 Build System (%(arch)s)
base_channels = sdl7-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/buildsys/7/os/%(arch)s/mirrorlist

[sdl7-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = sdl7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[sdl7-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s
base_channels = sdl7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[scientific6]
archs    = %(_x86_archs)s
name     = Scientific Linux 6 (%(arch)s)
gpgkey_url = http://ftp.scientificlinux.org/linux/scientific/6/i386/os/RPM-GPG-KEY-sl6
gpgkey_id = 9B1FD350
gpgkey_fingerprint = E2E5 CBB5 6E19 960F F509  6994 915D 75E0 9B1F D350
repo_url = http://ftp.scientificlinux.org/linux/scientific/6/%(arch)s/os/
dist_map_release = 6.2

[scientific6-updates-fast]
label    = %(base_channel)s-updates-fast
archs    = %(_x86_archs)s
name     = Scientific Linux 6 Updates FastBug (%(arch)s)
base_channels = scientific6-%(arch)s
repo_url = http://ftp.scientificlinux.org/linux/scientific/6/%(arch)s/updates/fastbugs/

[scientific6-updates-security]
label    = %(base_channel)s-updates-security
archs    = %(_x86_archs)s
name     = Scientific Linux 6 Updates Security (%(arch)s)
base_channels = scientific6-%(arch)s
repo_url = http://ftp.scientificlinux.org/linux/scientific/6/%(arch)s/updates/security/

[centos6]
archs    = %(_x86_archs)s
name     = CentOS 6 (%(arch)s)
gpgkey_url = http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
gpgkey_id = C105B9DE
gpgkey_fingerprint = C1DA C52D 1664 E8A4 386D  BA43 0946 FCA2 C105 B9DE
repo_url = https://vault.centos.org/centos/6/os/%(arch)s/
dist_map_release = 6

[centos6-centosplus]
label    = %(base_channel)s-centosplus
archs    = %(_x86_archs)s
name     = CentOS 6 Plus (%(arch)s)
base_channels = centos6-%(arch)s
repo_url = https://vault.centos.org/centos/6/centosplus/%(arch)s/

[centos6-contrib]
label    = %(base_channel)s-contrib
archs    = %(_x86_archs)s
name     = CentOS 6 Contrib (%(arch)s)
base_channels = centos6-%(arch)s
repo_url = https://vault.centos.org/centos/6/contrib/%(arch)s/

[centos6-extras]
label    = %(base_channel)s-extras
archs    = %(_x86_archs)s
name     = CentOS 6 Extras (%(arch)s)
base_channels = centos6-%(arch)s
repo_url = https://vault.centos.org/centos/6/extras/%(arch)s/

[centos6-fasttrack]
label    = %(base_channel)s-fasttrack
archs    = %(_x86_archs)s
name     = CentOS 6 FastTrack (%(arch)s)
base_channels = centos6-%(arch)s
repo_url = https://vault.centos.org/centos/6/fasttrack/%(arch)s/

[centos6-updates]
label    = %(base_channel)s-updates
archs    = %(_x86_archs)s
name     = CentOS 6 Updates (%(arch)s)
base_channels = centos6-%(arch)s
repo_url = https://vault.centos.org/centos/6/updates/%(arch)s/

[centos6-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = centos6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[centos6-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s
base_channels = centos6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[epel6]
label    = epel6-%(base_channel)s
name     = EPEL 6 for %(base_channel_name)s
archs    = %(_x86_archs)s, ppc64
base_channels = centos6-%(arch)s scientific6-%(arch)s
gpgkey_url = http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6
gpgkey_id = 0608B895
gpgkey_fingerprint = 8C3B E96A F230 9184 DA5C  0DAE 3B49 DF2A 0608 B895
repo_url = http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=%(arch)s

[sdl6]
archs    = %(_x86_archs)s
name     = Springdale Linux 6 (%(arch)s)
gpgkey_url = http://springdale.sprinceton.edu/data/puias/6/%(arch)s/os/RPM-GPG-KEY-puias
gpgkey_id = 41A40948
gpgkey_fingerprint = 2266 6129 E0CD 0062 8D44 C6CF 16CF C333 41A4 0948
repo_url = http://puias.sprinceton.edu/data/puias/6/%(arch)s/os/mirrorlist
dist_map_release = 6

[sdl6-updates]
label    = %(base_channel)s-updates
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Updates (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/updates/6/en/os/%(arch)s/mirrorlist

[sdl6-addons]
label    = %(base_channel)s-addons
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Addons (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/6/%(arch)s/os/Addons/mirrorlist

[sdl6-addons-updates]
label    = %(base_channel)s-addons-updates
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Addons Updates (%(arch)s)
base_channels = sdl6-addons-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/6/%(arch)s/os/Addons/mirrorlist

[sdl6-unsupported]
label    = %(base_channel)s-unsupported
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Unsupported (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/unsupported/6/%(arch)s/mirrorlist

[sdl6-computational]
label    = %(base_channel)s-computational
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Computational (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/puias/computational/6/%(arch)s/mirrorlist

[sdl6-ssa]
label    = %(base_channel)s-computational
archs    = %(_x86_archs)s
name     = Springdale Linux 6 SSA (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/SSA/6/%(arch)s/

[sdl6-scl]
label    = %(base_channel)s-computational
archs    = %(_x86_archs)s
name     = Springdale Linux 6 Software Collections (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/SCL/6/%(arch)s/

[sdl6-rhs]
label    = %(base_channel)s-rhs
archs    = %(_x86_archs)s
name     = Springdale Linux 6 RHS (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/RHS/6/%(arch)s/

[sdl6-ose]
label    = %(base_channel)s-ose
archs    = %(_x86_archs)s
name     = Springdale Linux 6 OSE (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/OSE/6/%(arch)s/

[sdl6-devtoolset]
label    = %(base_channel)s-devtoolset
archs    = %(_x86_archs)s
name     = Springdale Linux 6 DevToolSet (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/DevToolset/6/%(arch)s/mirrorlist

[sdl6-common]
label    = %(base_channel)s-common
archs    = %(_x86_archs)s
name     = Springdale Linux 6 COMMON (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/COMMON/6/%(arch)s/

[sdl6-buildsys]
label    = %(base_channel)s-buildsys
archs    = %(_x86_archs)s
name     = Springdale Linux 6 buildsys (%(arch)s)
base_channels = sdl6-%(arch)s
repo_url = http://springdale.princeton.edu/data/springdale/buildsys/6/os/%(arch)s/

[sdl6-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = sdl6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[sdl6-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s
base_channels = sdl6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[spacewalk28-client-scientific6]
name     = Spacewalk Client 2.8 for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = scientific6-%(arch)s
gpgkey_url = %(_spacewalk_28client_gpgkey_url)s
gpgkey_id = %(_spacewalk_28client_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_28client_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8-client/epel-6-%(arch)s/

[spacewalk28-client-oraclelinux6]
name     = Spacewalk Client 2.8 for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = oraclelinux6-%(arch)s
gpgkey_url = %(_spacewalk_28client_gpgkey_url)s
gpgkey_id = %(_spacewalk_28client_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_28client_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8-client/epel-6-%(arch)s/

[spacewalk28-client-oraclelinux7]
name     = Spacewalk Client 2.8 for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = oraclelinux7-%(arch)s
gpgkey_url = %(_spacewalk_28client_gpgkey_url)s
gpgkey_id = %(_spacewalk_28client_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_28client_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8-client/epel-7-%(arch)s/

[spacewalk-nightly-server-fedora30]
name     = Spacewalk Server nightly for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = fedora30-%(arch)s
gpgkey_url = %(_spacewalk_nightly_gpgkey_url)s
gpgkey_id = %(_spacewalk_nightly_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_nightly_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly/fedora-30-%(arch)s/


[spacewalk-nightly-client-fedora30]
name     = Spacewalk Client nightly for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = fedora30-%(arch)s
gpgkey_url = %(_spacewalk_nightlyclient_gpgkey_url)s
gpgkey_id = %(_spacewalk_nightlyclient_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_nightlyclient_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly-client/fedora-30-%(arch)s/


[spacewalk-nightly-client-scientific6]
name     = Spacewalk Client nightly for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = scientific6-%(arch)s
gpgkey_url = %(_spacewalk_nightlyclient_gpgkey_url)s
gpgkey_id = %(_spacewalk_nightlyclient_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_nightlyclient_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly-client/epel-6-%(arch)s/

[spacewalk-nightly-client-oraclelinux6]
name     = Spacewalk Client nightly for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = oraclelinux6-%(arch)s
gpgkey_url = %(_spacewalk_nightlyclient_gpgkey_url)s
gpgkey_id = %(_spacewalk_nightlyclient_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_nightlyclient_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly-client/epel-6-%(arch)s/

[spacewalk-nightly-client-oraclelinux7]
name     = Spacewalk Client nightly for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = oraclelinux7-%(arch)s
gpgkey_url = %(_spacewalk_nightlyclient_gpgkey_url)s
gpgkey_id = %(_spacewalk_nightlyclient_gpgkey_id)s
gpgkey_fingerprint = %(_spacewalk_nightlyclient_gpgkey_fingerprint)s
repo_url = https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/nightly-client/epel-7-%(arch)s/

#---

[opensuse_leap15_0]
checksum = sha256
archs    = x86_64
name     = openSUSE Leap 15.0 (%(arch)s)
gpgkey_url = http://download.opensuse.org/distribution/leap/15.0/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/distribution/leap/15.0/repo/oss/
dist_map_release = 15.0

[opensuse_leap15_0-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE 15.0 non oss (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_0-%(arch)s
repo_url = http://download.opensuse.org/distribution/leap/15.0/repo/non-oss/

[opensuse_leap15_0-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap 15.0 Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_0-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.0/oss/

[opensuse_leap15_0-non-oss-updates]
label    = %(base_channel)s-non-oss-updates
name     = openSUSE Leap 15.0 non oss Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_0-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.0/non-oss/

[opensuse_leap15_0-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_0-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_0-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = x86_64
base_channels = opensuse_leap15_0-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_1]
checksum = sha256
archs    = x86_64
name     = openSUSE Leap 15.1 (%(arch)s)
gpgkey_url = http://download.opensuse.org/distribution/leap/15.1/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/distribution/leap/15.1/repo/oss/
dist_map_release = 15.1

[opensuse_leap15_1-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE 15.1 non oss (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_1-%(arch)s
repo_url = http://download.opensuse.org/distribution/leap/15.1/repo/non-oss/

[opensuse_leap15_1-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap 15.1 Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_1-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.1/oss/

[opensuse_leap15_1-non-oss-updates]
label    = %(base_channel)s-non-oss-updates
name     = openSUSE Leap 15.1 non oss Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_1-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.1/non-oss/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_1-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_1-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = x86_64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_1-aarch64]
checksum = sha256
archs    = aarch64
label    = opensuse_leap15_1-%(arch)s
name     = openSUSE Leap 15.1 (%(arch)s)
gpgkey_url =  http://download.opensuse.org/ports/aarch64/distribution/leap/15.1/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/ports/aarch64/distribution/leap/15.1/repo/oss/
dist_map_release = 15.1

[opensuse_leap15_1-updates-aarch64]
label    = opensuse_leap15_1-updates-%(arch)s
name     = openSUSE Leap 15.1 Updates (%(arch)s)
archs    = aarch64
checksum = sha256
base_channels = opensuse_leap15_1-%(arch)s
repo_url = http://download.opensuse.org/ports/update/leap/15.1/oss/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_1-uyuni-client-aarch64]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = aarch64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_1-uyuni-client-devel-aarch64]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = aarch64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_2]
checksum = sha256
archs    = x86_64
name     = openSUSE Leap 15.2 (%(arch)s)
gpgkey_url = http://download.opensuse.org/distribution/leap/15.2/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/distribution/leap/15.2/repo/oss/
dist_map_release = 15.2

[opensuse_leap15_2-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE 15.2 non oss (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_2-%(arch)s
repo_url = http://download.opensuse.org/distribution/leap/15.2/repo/non-oss/

[opensuse_leap15_2-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap 15.2 Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_2-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.2/oss/

[opensuse_leap15_2-non-oss-updates]
label    = %(base_channel)s-non-oss-updates
name     = openSUSE Leap 15.2 non oss Updates (%(arch)s)
archs    = x86_64
checksum = sha256
base_channels = opensuse_leap15_2-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.2/non-oss/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_2-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_2-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = x86_64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_2-aarch64]
checksum = sha256
archs    = aarch64
label    = opensuse_leap15_2-%(arch)s
name     = openSUSE Leap 15.2 (%(arch)s)
gpgkey_url = http://download.opensuse.org/ports/aarch64/distribution/leap/15.2/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/ports/aarch64/distribution/leap/15.2/repo/oss/
dist_map_release = 15.2

[opensuse_leap15_2-updates-aarch64]
label    = opensuse_leap15_2-updates-%(arch)s
name     = openSUSE Leap 15.2 Updates (%(arch)s)
archs    = aarch64
checksum = sha256
base_channels = opensuse_leap15_2-%(arch)s
repo_url = http://download.opensuse.org/ports/update/leap/15.2/oss/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_2-uyuni-client-aarch64]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = aarch64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_2-uyuni-client-devel-aarch64]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = aarch64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_3]
checksum = sha256
archs    = x86_64, aarch64
name     = openSUSE Leap 15.3 (%(arch)s)
gpgkey_url = http://download.opensuse.org/distribution/leap/15.3/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/distribution/leap/15.3/repo/oss/
dist_map_release = 15.3

[opensuse_leap15_3-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE 15.3 non oss (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_3-%(arch)s
repo_url = http://download.opensuse.org/distribution/leap/15.3/repo/non-oss/

[opensuse_leap15_3-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap 15.3 Updates (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_3-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.3/oss/

[opensuse_leap15_3-non-oss-updates]
label    = %(base_channel)s-non-oss-updates
name     = openSUSE Leap 15.3 non oss Updates (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_3-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.3/non-oss/

[opensuse_leap15_3-sle-updates]
label    = %(base_channel)s-sle-updates
name     = Update repository with updates from SUSE Linux Enterprise 15 (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_3-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.3/sle/

[opensuse_leap15_3-backports-updates]
label    = %(base_channel)s-backports-updates
name     = Update repository of openSUSE Backports (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_3-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.3/backports/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_3-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_3-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_3-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = x86_64, aarch64
base_channels = opensuse_leap15_3-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_leap15_4]
checksum = sha256
archs    = x86_64, aarch64
name     = openSUSE Leap 15.4 (%(arch)s)
gpgkey_url = http://download.opensuse.org/distribution/leap/15.4/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/distribution/leap/15.4/repo/oss/
dist_map_release = 15.4

[opensuse_leap15_4-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE 15.4 non oss (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_4-%(arch)s
repo_url = http://download.opensuse.org/distribution/leap/15.4/repo/non-oss/

[opensuse_leap15_4-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap 15.4 Updates (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_4-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.4/oss/

[opensuse_leap15_4-non-oss-updates]
label    = %(base_channel)s-non-oss-updates
name     = openSUSE Leap 15.4 non oss Updates (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_4-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.4/non-oss/

[opensuse_leap15_4-sle-updates]
label    = %(base_channel)s-sle-updates
name     = Update repository with updates from SUSE Linux Enterprise 15 (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_4-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.4/sle/

[opensuse_leap15_4-backports-updates]
label    = %(base_channel)s-backports-updates
name     = Update repository of openSUSE Backports (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_leap15_4-%(arch)s
repo_url = http://download.opensuse.org/update/leap/15.4/backports/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_4-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

# This is expected. openSUSE Leap 15.0 client tools are valid for all openSUSE Leap 15.X releases
[opensuse_leap15_4-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = x86_64, aarch64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/openSUSE_Leap_15-Uyuni-Client-Tools/openSUSE_Leap_15.0/

[opensuse_tumbleweed]
checksum = sha256
archs    = x86_64, aarch64
name     = openSUSE Leap Tumbleweed (%(arch)s)
gpgkey_url = http://download.opensuse.org/tumbleweed/repo/oss/repodata/repomd.xml.key
gpgkey_id = 3DBDC284
gpgkey_fingerprint = 22C0 7BA5 3417 8CD0 2EFE  22AA B88B 2FD4 3DBD C284
repo_url = http://download.opensuse.org/tumbleweed/repo/oss/
dist_map_release = Tumbleweed

[opensuse_tumbleweed-non-oss]
label    = %(base_channel)s-non-oss
name     = openSUSE Tumbleweed non oss (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_tumbleweed-%(arch)s
repo_url = http://download.opensuse.org/tumbleweed/repo/non-oss/

[opensuse_tumbleweed-updates]
label    = %(base_channel)s-updates
name     = openSUSE Leap Tumbleweed Updates (%(arch)s)
archs    = x86_64, aarch64
checksum = sha256
base_channels = opensuse_tumbleweed-%(arch)s
repo_url = https://download.opensuse.org/update/tumbleweed/

[sles12-sp3-uyuni-client]
name     = Uyuni Client Tools for SLES12 SP3 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sp3-uyuni-client-devel]
name     = Uyuni Client Tools for SLES12 SP3 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sp4-uyuni-client]
name     = Uyuni Client Tools for SLES12 SP4 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp4-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sp4-uyuni-client-devel]
name     = Uyuni Client Tools for SLES12 SP4 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp4-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sap-sp4-uyuni-client]
name     = Uyuni Client Tools for SLES12 SP4 SAP %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle12-sp4-sap-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sap-sp4-uyuni-client-devel]
name     = Uyuni Client Tools for SLES12 SP4 SAP %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle12-sp4-sap-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sp5-uyuni-client]
name     = Uyuni Client Tools for SLES12 SP5 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp5-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sp5-uyuni-client-devel]
name     = Uyuni Client Tools for SLES12 SP5 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sles12-sp5-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sap-sp5-uyuni-client]
name     = Uyuni Client Tools for SLES12 SP5 SAP %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle12-sp5-sap-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles12-sap-sp5-uyuni-client-devel]
name     = Uyuni Client Tools for SLES12 SP5 SAP %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle12-sp5-sap-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE12-Uyuni-Client-Tools/SLE_12/

[sles15-uyuni-client]
name     = Uyuni Client Tools for SLES15 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-uyuni-client-devel]
name     = Uyuni Client Tools for SLES15 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp1-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP1 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp1-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp1-devel-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP1 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp1-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp2-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP2 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp2-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp2-devel-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP2 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp2-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sap-sp2-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP2 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles_sap15-sp2-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sap-sp2-devel-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP2 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles_sap15-sp2-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp3-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP3 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sp3-devel-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP3 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles15-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sap-sp3-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP3 %(arch)s
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles_sap15-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/SLE15-Uyuni-Client-Tools/SLE_15/

[sles15-sap-sp3-devel-uyuni-client]
name     = Uyuni Client Tools for SLES15 SP3 %(arch)s (Development)
archs    =  x86_64, s390x, aarch64, ppc64le
base_channels = sle-product-sles_sap15-sp3-pool-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/SLE15-Uyuni-Client-Tools/SLE_15/

[oraclelinux8]
archs    = x86_64, aarch64
name     = Oracle Linux 8 (%(arch)s)
checksum = sha256
gpgkey_url = https://yum.oracle.com/RPM-GPG-KEY-oracle-ol8
gpgkey_id  = AD986DA3
gpgkey_fingerprint = 76FD 3DB1 3AB6 7410 B89D B10E 8256 2EA9 AD98 6DA3
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/baseos/latest/%(arch)s/
dist_map_release = 8

[oraclelinux8-appstream]
archs    = x86_64, aarch64
name     = Oracle Linux 8 AppStream (%(arch)s)
base_channels = oraclelinux8-%(arch)s
checksum = sha256
gpgkey_url = https://yum.oracle.com/RPM-GPG-KEY-oracle-ol8
gpgkey_id  = AD986DA3
gpgkey_fingerprint = 76FD 3DB1 3AB6 7410 B89D B10E 8256 2EA9 AD98 6DA3
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/appstream/%(arch)s/
dist_map_release = 8

[oraclelinux8-addons]
label    = %(base_channel)s-addons
archs    = x86_64, aarch64
name     = Addons for Oracle Linux 8 (%(arch)s)
base_channels = oraclelinux8-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/addons/%(arch)s/

[oraclelinux8-codereadybuilder]
label    = %(base_channel)s-codereadybuilder
archs    = x86_64, aarch64
name     = Latest CodeReady Builder packages for Oracle Linux 8 (%(arch)s)
base_channels = oraclelinux8-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/codeready/builder/%(arch)s/

[oraclelinux8-developer-uek-r6]
label    = %(base_channel)s-uek-r6
archs    = x86_64, aarch64
name     = Latest Unbreakable Enterprise Kernel Release 6 for Oracle Linux 8 (%(arch)s)
base_channels = oraclelinux8-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/developer/UEKR6/%(arch)s/

[oraclelinux8-developer]
label    = %(base_channel)s-developer
archs    = x86_64, aarch64
name     = Packages for test and development - Oracle Linux 8 (%(arch)s)
base_channels = oraclelinux8-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/developer/%(arch)s/

[oraclelinux8-uek-r6-rdma]
label    = %(base_channel)s-uek-r6-rdma
archs    = x86_64
name     = Latest RDMA packages for UEK Release 6 packages on Oracle Linux 8
base_channels = oraclelinux8-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL8/UEKR6/RDMA/%(arch)s/

[oraclelinux8-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, aarch64
base_channels = oraclelinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/

[oraclelinux8-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, aarch64
base_channels = oraclelinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/

[oraclelinux7]
archs    = x86_64, aarch64
name     = Oracle Linux 7 (%(arch)s)
checksum = sha256
gpgkey_url = https://yum.oracle.com/RPM-GPG-KEY-oracle-ol7
gpgkey_id  = EC551F03
gpgkey_fingerprint = 4214 4123 FECF C55B 9086  313D 72F9 7B74 EC55 1F03
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/latest/%(arch)s/
dist_map_release = 7

[oraclelinux7-optional]
label    = %(base_channel)s-optional
archs    = x86_64, aarch64
name     = Optional Packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/optional/latest/%(arch)s/

[oraclelinux7-addons]
label    = %(base_channel)s-addons
archs    = x86_64
name     = Addons for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/addons/%(arch)s/

[oraclelinux7-uek-r6]
label    = %(base_channel)s-uek-r6
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 6 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR6/%(arch)s/

[oraclelinux7-uek-r5]
label    = %(base_channel)s-uek-r5
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 5 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR5/%(arch)s/

[oraclelinux7-uek-r4]
label    = %(base_channel)s-uek-r4
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 4 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR4/%(arch)s/

[oraclelinux7-uek-r3]
label    = %(base_channel)s-uek-r3
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 3 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR3/%(arch)s/

[oraclelinux7-uek-r6-rdma]
label    = %(base_channel)s-uek-r6-rdma
archs    = x86_64
name     = Latest RDMA packages for UEK Release 6 packages on Oracle Linux 7
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR6/RDMA/%(arch)s/

[oraclelinux7-uek-r5-rdma]
label    = %(base_channel)s-uek-r5-rdma
archs    = x86_64
name     = Latest RDMA packages for UEK Release 5 packages on Oracle Linux 7
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/UEKR5/RDMA/%(arch)s/

[oraclelinux7-mysql55]
label    = %(base_channel)s-mysql55
archs    = x86_64
name     = MySQL 5.5 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/MySQL55/%(arch)s/

[oraclelinux7-mysql56]
label    = %(base_channel)s-mysql56
archs    = x86_64
name     = MySQL 5.6 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/MySQL56/%(arch)s/

[oraclelinux7-mysql57]
label    = %(base_channel)s-mysql57
archs    = x86_64
name     = MySQL 5.7 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/MySQL57_community/%(arch)s/

[oraclelinux7-spacewalk22-client]
label    = %(base_channel)s-spacewalk22-client
archs    = x86_64
name     = Spacewalk 2.2 Client for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/spacewalk22/client/%(arch)s/

[oraclelinux7-spacewalk22-server]
label    = %(base_channel)s-spacewalk22-server
archs    = x86_64
name     = Spacewalk 2.2 Server for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/spacewalk22/server/%(arch)s/

[oraclelinux7-spacewalk24-client]
label    = %(base_channel)s-spacewalk24-client
archs    = x86_64
name     = Spacewalk 2.4 Client for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/spacewalk24/client/%(arch)s/

[oraclelinux7-spacewalk24-server]
label    = %(base_channel)s-spacewalk24-server
archs    = x86_64
name     = Spacewalk 2.4 Server for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/spacewalk24/server/%(arch)s/

[oraclelinux7-openstack20]
label     = %(base_channel)s-openstack20
archs     = x86_64
name      = OpenStack 2.0 packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/openstack20/%(arch)s/

[oraclelinux7-openstack21]
label     = %(base_channel)s-openstack21
archs     = x86_64
name      = OpenStack 2.1 packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/openstack21/%(arch)s/

[oraclelinux7-openstack30]
label     = %(base_channel)s-openstack30
archs     = x86_64
name      = OpenStack 3.0 packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/openstack30/%(arch)s/

[oraclelinux7-openstack30-extras]
label     = %(base_channel)s-openstack30-extras
archs     = x86_64
name      = OpenStack 3.0 Extra packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/openstack_extras/%(arch)s/

[oraclelinux7-scl]
label     = %(base_channel)s-scl
archs    = x86_64, aarch64
name      = Software Collection Library packages for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/SoftwareCollections/%(arch)s/

[oraclelinux7-ceph]
label     = %(base_channel)s-ceph
archs    = x86_64
name      = Ceph Storage for Oracle Linux Release 2.0 for Oracle Linux 7 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL7/ceph/%(arch)s/

[oraclelinux7-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, aarch64
base_channels = oraclelinux7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[oraclelinux7-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, aarch64
base_channels = oraclelinux7-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[oraclelinux6]
archs    = %(_x86_archs)s
name     = Oracle Linux 6 (%(arch)s)
checksum = sha256
gpgkey_url = https://yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgkey_id = EC551F03
gpgkey_fingerprint = 4214 4123 FECF C55B 9086  313D 72F9 7B74 EC55 1F03
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/latest/%(arch)s/
dist_map_release = 6

[oraclelinux6-addons]
label    = %(base_channel)s-addons
archs    = %(_x86_archs)s
name     = Addons for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/addons/%(arch)s/

[oraclelinux6-uek-r2]
label    = %(base_channel)s-uek-r2
archs    = %(_x86_archs)s
name     = Latest Unbreakable Enterprise Kernel Release 2 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/UEK/latest/%(arch)s/

[oraclelinux6-uek-r3]
label    = %(base_channel)s-uek-r3
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 3 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/UEKR3/latest/%(arch)s/

[oraclelinux6-uek-r4]
label    = %(base_channel)s-uek-r4
archs    = x86_64
name     = Latest Unbreakable Enterprise Kernel Release 4 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/UEKR4/latest/%(arch)s/

[oraclelinux6-mysql55]
label    = %(base_channel)s-mysql55
archs    = %(_x86_archs)s
name     = MySQL 5.5 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/MySQL/%(arch)s/

[oraclelinux6-mysql56]
label    = %(base_channel)s-mysql56
archs    = %(_x86_archs)s
name     = MySQL 5.6 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/MySQL56/%(arch)s/

[oraclelinux6-mysql57]
label    = %(base_channel)s-mysql57
archs    = %(_x86_archs)s
name     = MySQL 5.7 for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux7-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/MySQL57_community/%(arch)s/

[oraclelinux6-playground]
label    = %(base_channel)s-playground
archs    = x86_64
name     = Playground (Mainline) Kernels for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/playground/latest/%(arch)s/

[oraclelinux6-spacewalk22-server]
label    = %(base_channel)s-spacewalk22-server
archs    = x86_64
name     = Spacewalk 2.2 Server for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/spacewalk22/server/%(arch)s/

[oraclelinux6-spacewalk22-client]
label    = %(base_channel)s-spacewalk22-client
archs    = %(_x86_archs)s
name     = Spacewalk 2.2 Client for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/spacewalk22/client/%(arch)s/

[oraclelinux6-spacewalk24-server]
label    = %(base_channel)s-spacewalk24-server
archs    = x86_64
name     = Spacewalk 2.4 Server for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/spacewalk24/server/%(arch)s/

[oraclelinux6-spacewalk24-client]
label    = %(base_channel)s-spacewalk24-client
archs    = %(_x86_archs)s
name     = Spacewalk 2.4 Client for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/spacewalk24/client/%(arch)s/

[oraclelinux6-scl]
label     = %(base_channel)s-scl
archs    = x86_64
name      = Software Collection Library packages for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://public-yum.oracle.com/repo/OracleLinux/OL6/SoftwareCollections/%(arch)s/

[oraclelinux6-openstack30]
label     = %(base_channel)s-openstack30
archs     = x86_64
name      = OpenStack 3.0 packages for Oracle Linux 6 (%(arch)s)
base_channels = oraclelinux6-%(arch)s
repo_url = https://yum.oracle.com/repo/OracleLinux/OL6/openstack30/%(arch)s/

[oraclelinux6-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s
base_channels = oraclelinux6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[oraclelinux6-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s
base_channels = oraclelinux6-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS6-Uyuni-Client-Tools/CentOS_6/

[uyuni-server-stable-leap-151]
name     = Uyuni Server Stable for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/

[uyuni-server-stable-leap-152]
name     = Uyuni Server Stable for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/

[uyuni-server-stable-leap-153]
name     = Uyuni Server Stable for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_3-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/

[uyuni-server-stable-leap-154]
name     = Uyuni Server Stable for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/

[uyuni-server-devel-leap]
name     = Uyuni Server Devel for %(base_channel_name)s (Development)
archs    = x86_64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/

[uyuni-proxy-stable-leap-151]
name     = Uyuni Proxy Stable for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_1-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/

[uyuni-proxy-stable-leap-152]
name     = Uyuni Proxy Stable for %(base_channel_name)s
archs    = x86_64
base_channels = opensuse_leap15_2-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/

[uyuni-proxy-stable-leap-153]
name     = Uyuni Proxy Stable for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_3-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/

[uyuni-proxy-stable-leap-154]
name     = Uyuni Proxy Stable for %(base_channel_name)s
archs    = x86_64, aarch64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/

[uyuni-proxy-devel-leap]
name     = Uyuni Proxy Devel for %(base_channel_name)s (Development)
archs    = x86_64
base_channels = opensuse_leap15_4-%(arch)s
checksum = sha256
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/

[ubuntu-1604-pool-amd64]
label    = ubuntu-16.04-pool-amd64
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = ubuntu-16.04-pool for amd64
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://localhost/pub/repositories/empty-deb/?uniquekey=1604

[ubuntu-1604-amd64-main]
label    = ubuntu-1604-amd64-main
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 16.04 LTS AMD64 Main
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/main/binary-amd64/

[ubuntu-1604-amd64-updates]
label    = ubuntu-1604-amd64-main-updates
name     = Ubuntu 16.04 LTS AMD64 Main Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/main/binary-amd64/

[ubuntu-1604-amd64-security]
label    = ubuntu-1604-amd64-main-security
name     = Ubuntu 16.04 LTS AMD64 Security
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-security/main/binary-amd64/

[ubuntu-1604-amd64-universe]
label    = ubuntu-1604-amd64-universe
name     = Ubuntu 16.04 LTS AMD64 Universe
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/universe/binary-amd64/

[ubuntu-1604-amd64-universe-updates]
label    = ubuntu-1604-amd64-universe-updates
name     = Ubuntu 16.04 LTS AMD64 Universe Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/universe/binary-amd64/

[ubuntu-1604-amd64-universe-security]
label    = ubuntu-1604-amd64-universe-security
name     = Ubuntu 16.04 LTS AMD64 Universe Security Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-security/universe/binary-amd64/

[ubuntu-1604-amd64-main-backports]
label     = ubuntu-1604-amd64-main-backports
name      = Ubuntu 16.04 LTS AMD64 Main Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/main/binary-amd64/

[ubuntu-1604-amd64-multiverse-backports]
label     = ubuntu-1604-amd64-multiverse-backports
name      = Ubuntu 16.04 LTS AMD64 Multiverse Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-backports]
label     = ubuntu-1604-amd64-restricted-backports
name      = Ubuntu 16.04 LTS AMD64 Restricted Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/restricted/binary-amd64/

[ubuntu-1604-amd64-universe-backports]
label     = ubuntu-1604-amd64-universe-backports
name      = Ubuntu 16.04 LTS AMD64 Universe Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/universe/binary-amd64/

[ubuntu-1604-amd64-multiverse]
label    = ubuntu-1604-amd64-multiverse
name     = Ubuntu 16.04 LTS AMD64 Multiverse
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted]
label     = ubuntu-1604-amd64-restricted
name      = Ubuntu 16.04 LTS AMD64 Restricted
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial/restricted/binary-amd64/

[ubuntu-1604-amd64-multiverse-security]
label     = ubuntu-1604-amd64-multiverse-security
name      = Ubuntu 16.04 LTS AMD64 Multiverse Security
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url  = http://security.ubuntu.com/ubuntu/dists/xenial-security/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-security]
label     = ubuntu-1604-amd64-restricted-security
name      = Ubuntu 16.04 LTS AMD64 Restricted Security
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url  = http://security.ubuntu.com/ubuntu/dists/xenial-security/restricted/binary-amd64/

[ubuntu-1604-amd64-multiverse-updates]
label     = ubuntu-1604-amd64-multiverse-updates
name      = Ubuntu 16.04 LTS AMD64 Multiverse Updates
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-updates]
label     = ubuntu-1604-amd64-restricted-updates
name      = Ubuntu 16.04 LTS AMD64 Restricted Updates
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/restricted/binary-amd64/

[ubuntu-1604-pool-amd64-uyuni]
label    = ubuntu-16.04-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 16.04 LTS AMD64 Base for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://localhost/pub/repositories/empty-deb/?uniquekey=1604-uyuni

[ubuntu-1604-amd64-main-uyuni]
label    = ubuntu-1604-amd64-main-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 16.04 LTS AMD64 Main for Uyuni
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/main/binary-amd64/

[ubuntu-1604-amd64-updates-uyuni]
label    = ubuntu-1604-amd64-main-updates-uyuni
name     = Ubuntu 16.04 LTS AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/main/binary-amd64/

[ubuntu-1604-amd64-security-uyuni]
label    = ubuntu-1604-amd64-main-security-uyuni
name     = Ubuntu 16.04 LTS AMD64 Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-security/main/binary-amd64/

[ubuntu-1604-amd64-universe-uyuni]
label    = ubuntu-1604-amd64-universe-uyuni
name     = Ubuntu 16.04 LTS AMD64 Universe for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/universe/binary-amd64/

[ubuntu-1604-amd64-universe-updates-uyuni]
label    = ubuntu-1604-amd64-universe-updates-uyuni
name     = Ubuntu 16.04 LTS AMD64 Universe Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/universe/binary-amd64/

[ubuntu-1604-amd64-universe-security-uyuni]
label    = ubuntu-1604-amd64-universe-security-uyuni
name     = Ubuntu 16.04 LTS AMD64 Universe Security Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-security/universe/binary-amd64/

[ubuntu-1604-amd64-main-backports-uyuni]
label     = ubuntu-1604-amd64-main-backports-uyuni
name      = Ubuntu 16.04 LTS AMD64 Main Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/main/binary-amd64/

[ubuntu-1604-amd64-multiverse-backports-uyuni]
label     = ubuntu-1604-amd64-multiverse-backports-uyuni
name      = Ubuntu 16.04 LTS AMD64 Multiverse Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-backports-uyuni]
label     = ubuntu-1604-amd64-restricted-backports-uyuni
name      = Ubuntu 16.04 LTS AMD64 Restricted Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/restricted/binary-amd64/

[ubuntu-1604-amd64-universe-backports-uyuni]
label     = ubuntu-1604-amd64-universe-backports-uyuni
name      = Ubuntu 16.04 LTS AMD64 Universe Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial-backports/universe/binary-amd64/

[ubuntu-1604-amd64-multiverse-uyuni]
label    = ubuntu-1604-amd64-multiverse-uyuni
name     = Ubuntu 16.04 LTS AMD64 Multiverse for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/xenial/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-uyuni]
label     = ubuntu-1604-amd64-restricted-uyuni
name      = Ubuntu 16.04 LTS AMD64 Restricted for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial/restricted/binary-amd64/

[ubuntu-1604-amd64-multiverse-security-uyuni]
label     = ubuntu-1604-amd64-multiverse-security-uyuni
name      = Ubuntu 16.04 LTS AMD64 Multiverse Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/xenial-security/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-security-uyuni]
label     = ubuntu-1604-amd64-restricted-security-uyuni
name      = Ubuntu 16.04 LTS AMD64 Restricted Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/xenial-security/restricted/binary-amd64/

[ubuntu-1604-amd64-multiverse-updates-uyuni]
label     = ubuntu-1604-amd64-multiverse-updates-uyuni
name      = Ubuntu 16.04 LTS AMD64 Multiverse Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/multiverse/binary-amd64/

[ubuntu-1604-amd64-restricted-updates-uyuni]
label     = ubuntu-1604-amd64-restricted-updates-uyuni
name      = Ubuntu 16.04 LTS AMD64 Restricted Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/xenial-updates/restricted/binary-amd64/

[ubuntu-1604-amd64-uyuni-client]
label    = ubuntu-1604-amd64-uyuni-client
name     = Uyuni Client Tools for Ubuntu 16.04 AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Ubuntu1604-Uyuni-Client-Tools/xUbuntu_16.04/

[ubuntu-1604-amd64-uyuni-client-devel]
label    = ubuntu-1604-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Ubuntu 16.04 AMD64 (Developement)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-16.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Ubuntu1604-Uyuni-Client-Tools/xUbuntu_16.04/

[ubuntu-1804-pool-amd64]
label    = ubuntu-18.04-pool-amd64
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = ubuntu-18.04-pool for amd64
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://localhost/pub/repositories/empty-deb/?uniquekey=1804

[ubuntu-1804-amd64-main]
label    = ubuntu-1804-amd64-main
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 18.04 LTS AMD64 Main
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/main/binary-amd64/

[ubuntu-1804-amd64-main-updates]
label    = ubuntu-1804-amd64-main-updates
name     = Ubuntu 18.04 LTS AMD64 Main Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/main/binary-amd64/

[ubuntu-1804-amd64-main-security]
label    = ubuntu-1804-amd64-main-security
name     = Ubuntu 18.04 LTS AMD64 Main Security
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-security/main/binary-amd64/

[ubuntu-1804-amd64-universe]
label    = ubuntu-1804-amd64-universe
name     = Ubuntu 18.04 LTS AMD64 Universe
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/universe/binary-amd64/

[ubuntu-1804-amd64-universe-updates]
label    = ubuntu-1804-amd64-universe-updates
name     = Ubuntu 18.04 LTS AMD64 Universe Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/universe/binary-amd64/

[ubuntu-1804-amd64-universe-security]
label    = ubuntu-1804-amd64-universe-security
name     = Ubuntu 18.04 LTS AMD64 Universe Security Updates
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-security/universe/binary-amd64/

[ubuntu-1804-amd64-main-backports]
label     = ubuntu-1804-amd64-main-backports
name      = Ubuntu 18.04 LTS AMD64 Main Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/main/binary-amd64/

[ubuntu-1804-amd64-multiverse-backports]
label     = ubuntu-1804-amd64-multiverse-backports
name      = Ubuntu 18.04 LTS AMD64 Multiverse Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-backports]
label     = ubuntu-1804-amd64-restricted-backports
name      = Ubuntu 18.04 LTS AMD64 Restricted Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/restricted/binary-amd64/

[ubuntu-1804-amd64-universe-backports]
label     = ubuntu-1804-amd64-universe-backports
name      = Ubuntu 18.04 LTS AMD64 Universe Backports
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/universe/binary-amd64/

[ubuntu-1804-amd64-multiverse]
label    = ubuntu-1804-amd64-multiverse
name     = Ubuntu 18.04 LTS AMD64 Multiverse
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted]
label     = ubuntu-1804-amd64-restricted
name      = Ubuntu 18.04 LTS AMD64 Restricted
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic/restricted/binary-amd64/

[ubuntu-1804-amd64-multiverse-security]
label     = ubuntu-1804-amd64-multiverse-security
name      = Ubuntu 18.04 LTS AMD64 Multiverse Security
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url  = http://security.ubuntu.com/ubuntu/dists/bionic-security/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-security]
label     = ubuntu-1804-amd64-restricted-security
name      = Ubuntu 18.04 LTS AMD64 Restricted Security
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url  = http://security.ubuntu.com/ubuntu/dists/bionic-security/restricted/binary-amd64/

[ubuntu-1804-amd64-multiverse-updates]
label     = ubuntu-1804-amd64-multiverse-updates
name      = Ubuntu 18.04 LTS AMD64 Multiverse Updates
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-updates]
label     = ubuntu-1804-amd64-restricted-updates
name      = Ubuntu 18.04 LTS AMD64 Restricted Updates
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/restricted/binary-amd64/

[ubuntu-1804-pool-amd64-uyuni]
label    = ubuntu-18.04-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 18.04 LTS AMD64 Base for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://localhost/pub/repositories/empty-deb/?uniquekey=1804-uyuni

[ubuntu-1804-amd64-main-uyuni]
label    = ubuntu-1804-amd64-main-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 18.04 LTS AMD64 Main for Uyuni
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/main/binary-amd64/

[ubuntu-1804-amd64-main-updates-uyuni]
label    = ubuntu-1804-amd64-main-updates-uyuni
name     = Ubuntu 18.04 LTS AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/main/binary-amd64/

[ubuntu-1804-amd64-main-security-uyuni]
label    = ubuntu-1804-amd64-main-security-uyuni
name     = Ubuntu 18.04 LTS AMD64 Main Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-security/main/binary-amd64/

[ubuntu-1804-amd64-universe-uyuni]
label    = ubuntu-1804-amd64-universe-uyuni
name     = Ubuntu 18.04 LTS AMD64 Universe for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/universe/binary-amd64/

[ubuntu-1804-amd64-universe-updates-uyuni]
label    = ubuntu-1804-amd64-universe-updates-uyuni
name     = Ubuntu 18.04 LTS AMD64 Universe Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/universe/binary-amd64/

[ubuntu-1804-amd64-universe-security-uyuni]
label    = ubuntu-1804-amd64-universe-security-uyuni
name     = Ubuntu 18.04 LTS AMD64 Universe Security Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-security/universe/binary-amd64/

[ubuntu-1804-amd64-main-backports-uyuni]
label     = ubuntu-1804-amd64-main-backports-uyuni
name      = Ubuntu 18.04 LTS AMD64 Main Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/main/binary-amd64/

[ubuntu-1804-amd64-multiverse-backports-uyuni]
label     = ubuntu-1804-amd64-multiverse-backports-uyuni
name      = Ubuntu 18.04 LTS AMD64 Multiverse Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-backports-uyuni]
label     = ubuntu-1804-amd64-restricted-backports-uyuni
name      = Ubuntu 18.04 LTS AMD64 Restricted Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/restricted/binary-amd64/

[ubuntu-1804-amd64-universe-backports-uyuni]
label     = ubuntu-1804-amd64-universe-backports-uyuni
name      = Ubuntu 18.04 LTS AMD64 Universe Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic-backports/universe/binary-amd64/

[ubuntu-1804-amd64-multiverse-uyuni]
label    = ubuntu-1804-amd64-multiverse-uyuni
name     = Ubuntu 18.04 LTS AMD64 Multiverse for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/bionic/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-uyuni]
label     = ubuntu-1804-amd64-restricted-uyuni
name      = Ubuntu 18.04 LTS AMD64 Restricted for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic/restricted/binary-amd64/

[ubuntu-1804-amd64-multiverse-security-uyuni]
label     = ubuntu-1804-amd64-multiverse-security-uyuni
name      = Ubuntu 18.04 LTS AMD64 Multiverse Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/bionic-security/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-security-uyuni]
label     = ubuntu-1804-amd64-restricted-security-uyuni
name      = Ubuntu 18.04 LTS AMD64 Restricted Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/bionic-security/restricted/binary-amd64/

[ubuntu-1804-amd64-multiverse-updates-uyuni]
label     = ubuntu-1804-amd64-multiverse-updates-uyuni
name      = Ubuntu 18.04 LTS AMD64 Multiverse Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/multiverse/binary-amd64/

[ubuntu-1804-amd64-restricted-updates-uyuni]
label     = ubuntu-1804-amd64-restricted-updates-uyuni
name      = Ubuntu 18.04 LTS AMD64 Restricted Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/bionic-updates/restricted/binary-amd64/

[ubuntu-1804-amd64-uyuni-client]
label    = ubuntu-1804-amd64-uyuni-client
name     = Uyuni Client Tools for Ubuntu 18.04 AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Ubuntu1804-Uyuni-Client-Tools/xUbuntu_18.04/

[ubuntu-1804-amd64-uyuni-client-devel]
label    = ubuntu-1804-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Ubuntu 18.04 AMD64 (Development)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-18.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Ubuntu1804-Uyuni-Client-Tools/xUbuntu_18.04/

[ubuntu-2004-pool-amd64-uyuni]
label    = ubuntu-20.04-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 20.04 LTS AMD64 Base for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://localhost/pub/repositories/empty-deb/?uniquekey=2004-uyuni

[ubuntu-2004-amd64-main-uyuni]
label    = ubuntu-2004-amd64-main-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Ubuntu 20.04 LTS AMD64 Main for Uyuni
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal/main/binary-amd64/

[ubuntu-2004-amd64-main-updates-uyuni]
label    = ubuntu-2004-amd64-main-updates-uyuni
name     = Ubuntu 20.04 LTS AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-updates/main/binary-amd64/

[ubuntu-2004-amd64-main-security-uyuni]
label    = ubuntu-2004-amd64-main-security-uyuni
name     = Ubuntu 20.04 LTS AMD64 Main Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-security/main/binary-amd64/

[ubuntu-2004-amd64-universe-uyuni]
label    = ubuntu-2004-amd64-universe-uyuni
name     = Ubuntu 20.04 LTS AMD64 Universe for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal/universe/binary-amd64/

[ubuntu-2004-amd64-universe-updates-uyuni]
label    = ubuntu-2004-amd64-universe-updates-uyuni
name     = Ubuntu 20.04 LTS AMD64 Universe Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-updates/universe/binary-amd64/

[ubuntu-2004-amd64-universe-security-uyuni]
label    = ubuntu-2004-amd64-universe-security-uyuni
name     = Ubuntu 20.04 LTS AMD64 Universe Security Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-security/universe/binary-amd64/

[ubuntu-2004-amd64-main-backports-uyuni]
label     = ubuntu-2004-amd64-main-backports-uyuni
name      = Ubuntu 20.04 LTS AMD64 Main Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-backports/main/binary-amd64/

[ubuntu-2004-amd64-multiverse-backports-uyuni]
label     = ubuntu-2004-amd64-multiverse-backports-uyuni
name      = Ubuntu 20.04 LTS AMD64 Multiverse Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-backports/multiverse/binary-amd64/

[ubuntu-2004-amd64-restricted-backports-uyuni]
label     = ubuntu-2004-amd64-restricted-backports-uyuni
name      = Ubuntu 20.04 LTS AMD64 Restricted Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-backports/restricted/binary-amd64/

[ubuntu-2004-amd64-universe-backports-uyuni]
label     = ubuntu-2004-amd64-universe-backports-uyuni
name      = Ubuntu 20.04 LTS AMD64 Universe Backports for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal-backports/universe/binary-amd64/

[ubuntu-2004-amd64-multiverse-uyuni]
label    = ubuntu-2004-amd64-multiverse-uyuni
name     = Ubuntu 20.04 LTS AMD64 Multiverse for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = http://archive.ubuntu.com/ubuntu/dists/focal/multiverse/binary-amd64/

[ubuntu-2004-amd64-restricted-uyuni]
label     = ubuntu-2004-amd64-restricted-uyuni
name      = Ubuntu 20.04 LTS AMD64 Restricted for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/focal/restricted/binary-amd64/

[ubuntu-2004-amd64-multiverse-security-uyuni]
label     = ubuntu-2004-amd64-multiverse-security-uyuni
name      = Ubuntu 20.04 LTS AMD64 Multiverse Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/focal-security/multiverse/binary-amd64/

[ubuntu-2004-amd64-restricted-security-uyuni]
label     = ubuntu-2004-amd64-restricted-security-uyuni
name      = Ubuntu 20.04 LTS AMD64 Restricted Security for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url  = http://security.ubuntu.com/ubuntu/dists/focal-security/restricted/binary-amd64/

[ubuntu-2004-amd64-multiverse-updates-uyuni]
label     = ubuntu-2004-amd64-multiverse-updates-uyuni
name      = Ubuntu 20.04 LTS AMD64 Multiverse Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/focal-updates/multiverse/binary-amd64/

[ubuntu-2004-amd64-restricted-updates-uyuni]
label     = ubuntu-2004-amd64-restricted-updates-uyuni
name      = Ubuntu 20.04 LTS AMD64 Restricted Updates for Uyuni
archs     = amd64-deb
repo_type = deb
checksum  = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url  = http://archive.ubuntu.com/ubuntu/dists/focal-updates/restricted/binary-amd64/

[ubuntu-2004-amd64-uyuni-client]
label    = ubuntu-2004-amd64-uyuni-client
name     = Uyuni Client Tools for Ubuntu 20.04 AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Ubuntu2004-Uyuni-Client-Tools/xUbuntu_20.04/

[ubuntu-2004-amd64-uyuni-client-devel]
label    = ubuntu-2004-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Ubuntu 20.04 AMD64 (Development)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = ubuntu-20.04-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Ubuntu2004-Uyuni-Client-Tools/xUbuntu_20.04/

[debian-9-pool-amd64-uyuni]
label    = debian-9-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Debian 9 (stretch) pool for amd64 for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://deb.debian.org/debian/dists/stretch/main/binary-amd64/

[debian-9-amd64-main-updates-uyuni]
label    = debian-9-amd64-main-updates-uyuni
name     = Debian 9 (stretch) AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-9-pool-amd64-uyuni
repo_url = http://deb.debian.org/debian/dists/stretch-updates/main/binary-amd64/

[debian-9-amd64-main-security-uyuni]
label    = debian-9-amd64-main-security-uyuni
name     = Debian 9 (stretch) AMD64 Main Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-9-pool-amd64-uyuni
repo_url = http://security-cdn.debian.org/debian-security/dists/stretch/updates/main/binary-amd64/

[debian-9-amd64-uyuni-client-devel]
label    = debian-9-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Debian 9 Stretch AMD64 (Development)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-9-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Debian9-Uyuni-Client-Tools/Debian_9/

[debian-9-amd64-uyuni-client]
label    = debian-9-amd64-uyuni-client
name     = Uyuni Client Tools for Debian 9 Stretch AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-9-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Debian9-Uyuni-Client-Tools/Debian_9/

[debian-10-pool-amd64-uyuni]
label    = debian-10-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Debian 10 (buster) pool for amd64 for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://deb.debian.org/debian/dists/buster/main/binary-amd64/

[debian-10-amd64-main-updates-uyuni]
label    = debian-10-amd64-main-updates-uyuni
name     = Debian 10 (buster) AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-10-pool-amd64-uyuni
repo_url = http://deb.debian.org/debian/dists/buster-updates/main/binary-amd64/

[debian-10-amd64-main-security-uyuni]
label    = debian-10-amd64-main-security-uyuni
name     = Debian 10 (buster) AMD64 Main Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-10-pool-amd64-uyuni
repo_url = http://security-cdn.debian.org/debian-security/dists/buster/updates/main/binary-amd64/

[debian-10-amd64-uyuni-client-devel]
label    = debian-10-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Debian 10 Buster AMD64 (Development)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-10-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Debian10-Uyuni-Client-Tools/Debian_10/

[debian-10-amd64-uyuni-client]
label    = debian-10-amd64-uyuni-client
name     = Uyuni Client Tools for Debian 10 Buster AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-10-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Debian10-Uyuni-Client-Tools/Debian_10/

[debian-11-pool-amd64-uyuni]
label    = debian-11-pool-amd64-uyuni
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Debian 11 (bullseye) pool for amd64 for Uyuni
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = http://deb.debian.org/debian/dists/bullseye/main/binary-amd64/

[debian-11-amd64-main-updates-uyuni]
label    = debian-11-amd64-main-updates-uyuni
name     = Debian 11 (bullseye) AMD64 Main Updates for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-11-pool-amd64-uyuni
repo_url = http://deb.debian.org/debian/dists/bullseye-updates/main/binary-amd64/

[debian-11-amd64-main-security-uyuni]
label    = debian-11-amd64-main-security-uyuni
name     = Debian 11 (bullseye) AMD64 Main Security for Uyuni
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-11-pool-amd64-uyuni
repo_url = http://security.debian.org/debian-security/dists/bullseye-security/updates/main/binary-amd64/

[debian-11-amd64-uyuni-client-devel]
label    = debian-11-amd64-uyuni-client-devel
name     = Uyuni Client Tools for Debian 11 Bullseye AMD64 (Development)
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-11-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/Debian11-Uyuni-Client-Tools/Debian_11/

[debian-11-amd64-uyuni-client]
label    = debian-11-amd64-uyuni-client
name     = Uyuni Client Tools for Debian 11 Bullseye AMD64
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = debian-11-pool-amd64-uyuni
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/Debian11-Uyuni-Client-Tools/Debian_11/

[astralinux-orel-pool-amd64]
label    = astralinux-orel-pool-amd64
checksum = sha256
archs    = amd64-deb
repo_type = deb
name     = Astra Linux Orel pool for amd64
gpgkey_url =
gpgkey_id =
gpgkey_fingerprint =
repo_url = https://download.astralinux.ru/astra/stable/orel/repository/dists/orel/main/binary-amd64/

[astralinux-orel-amd64-contrib]
label    = astralinux-orel-amd64-contrib
name     = Astra Linux Orel AMD64 Contrib
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = astralinux-orel-pool-amd64
repo_url = https://download.astralinux.ru/astra/stable/orel/repository/dists/orel/contrib/binary-amd64/

[astralinux-orel-amd64-non-free]
label    = astralinux-orel-amd64-non-free
name     = Astra Linux Orel AMD64 non-free
archs    = amd64-deb
repo_type = deb
checksum = sha256
base_channels = astralinux-orel-pool-amd64
repo_url = https://download.astralinux.ru/astra/stable/orel/repository/dists/orel/non-free/binary-amd64/

[amazonlinux2-core]
archs    =  x86_64, aarch64
name     = Amazon Linux 2 Core %(arch)s
label    = amazonlinux2-core-%(arch)s
checksum  = sha256
gpgkey_url =
gpgkey_id = C87F5B1A
gpgkey_fingerprint = 99E6 17FE 5DB5 27C0 D8BD  5F8E 11CF 1F95 C87F 5B1A
repo_url = http://amazonlinux.default.amazonaws.com/2/core/latest/%(arch)s/mirror.list

[amazonlinux2-extra-docker]
archs    =  x86_64, aarch64
name     = Amazon Extras 2 repo for Docker %(arch)s
label    = amazonlinux2-extras-docker-%(arch)s
base_channels = amazonlinux2-core-%(arch)s
checksum = sha256
gpgkey_id = C87F5B1A
gpgkey_fingerprint = 99E6 17FE 5DB5 27C0 D8BD  5F8E 11CF 1F95 C87F 5B1A
repo_url = http://amazonlinux.default.amazonaws.com/2/extras/docker/latest/%(arch)s/mirror.list

[amazonlinux2-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    =  x86_64, aarch64
base_channels = amazonlinux2-core-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[amazonlinux2-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    =  x86_64, aarch64
base_channels = amazonlinux2-core-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[alibaba-2]
archs    = x86_64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
checksum = sha256
gpgkey_url = https://mirrors.aliyun.com/alinux/RPM-GPG-KEY-ALIYUN
gpgkey_id  = 873141A8
gpgkey_fingerprint = EFD7 52E7 E232 ED87 12E7  635C EB80 1C41 8731 41A8
repo_url = https://mirrors.aliyun.com/alinux/2/os/%(arch)s/
dist_map_release = 2

[alibaba-2-exp]
label    = %(base_channel)s-exp
archs    = x86_64
name     = Development Packages for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/exp/%(arch)s/

[alibaba-2-extras]
label    = %(base_channel)s-extras
archs    = x86_64
name     = Extras for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/extras/%(arch)s/

[alibaba-2-kernels]
label    = %(base_channel)s-kernels
archs    = x86_64
name     = Additional kernels for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/kernels/%(arch)s/

[alibaba-2-plus]
label    = %(base_channel)s-plus
archs    = x86_64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 Plus (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/plus/%(arch)s/

[alibaba-2-updates]
label    = %(base_channel)s-updates
archs    = x86_64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 Updates (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/updates/%(arch)s/

[alibaba-2-aarch64]
label    = alibaba-2-%(arch)s
archs    = aarch64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
checksum = sha256
gpgkey_url = https://mirrors.aliyun.com/alinux/RPM-GPG-KEY-ALIYUN
gpgkey_id  = 873141A8
gpgkey_fingerprint = EFD7 52E7 E232 ED87 12E7  635C EB80 1C41 8731 41A8
repo_url = https://mirrors.aliyun.com/alinux/2/os-arm/%(arch)s/
dist_map_release = 2

[alibaba-2-exp-aarch64]
label    = %(base_channel)s-exp-aarch64
archs    = aarch64
name     = Development Packages for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/exp/%(arch)s/

[alibaba-2-extras-aarch64]
label    = %(base_channel)s-extras-aarch64
archs    = aarch64
name     = Extras for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/extras/%(arch)s/

[alibaba-2-kernels-aarch64]
label    = %(base_channel)s-kernels-aarch64
archs    = aarch64
name     = Additional kernels for Alibaba Cloud Linux (Aliyun Linux) release 2 (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/kernels/%(arch)s/

[alibaba-2-plus-aarch64]
label    = %(base_channel)s-plus-aarch64
archs    = aarch64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 Plus (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/plus/%(arch)s/

[alibaba-2-updates-aarch64]
label    = %(base_channel)s-updates-aarch64
archs    = aarch64
name     = Alibaba Cloud Linux (Aliyun Linux) release 2 Updates (%(arch)s)
base_channels = alibaba-2-%(arch)s
repo_url = https://mirrors.aliyun.com/alinux/2/updates/%(arch)s/

[alibaba-2-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, aarch64
base_channels = alibaba-2-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[alibaba-2-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, aarch64
base_channels = alibaba-2-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/CentOS7-Uyuni-Client-Tools/CentOS_7/

[almalinux8]
archs    = x86_64, aarch64
name     = AlmaLinux 8 (%(arch)s)
gpgkey_url = https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux
gpgkey_id = 3ABB34F8
gpgkey_fingerprint = 5E9B 8F56 17B5 066C E920 57C3 488F CF7C 3ABB 34F8
repo_url = https://mirrors.almalinux.org/mirrorlist/8/baseos
dist_map_release = 8

[almalinux8-appstream]
label    = %(base_channel)s-appstream
archs    = x86_64, aarch64
name     = AlmaLinux 8 AppStream (%(arch)s)
base_channels = almalinux8-%(arch)s
repo_url = https://mirrors.almalinux.org/mirrorlist/8/appstream

[almalinux8-extras]
label    = %(base_channel)s-extras
archs    = x86_64, aarch64
name     = AlmaLinux 8 Extras (%(arch)s)
base_channels = almalinux8-%(arch)s
repo_url = https://mirrors.almalinux.org/mirrorlist/8/extras

[almalinux8-powertools]
label    = %(base_channel)s-powertools
archs    = x86_64, aarch64
name     = AlmaLinux 8 PowerTools (%(arch)s)
base_channels = almalinux8-%(arch)s
repo_url = https://mirrors.almalinux.org/mirrorlist/8/powertools

[almalinux8-ha]
label    = %(base_channel)s-ha
archs    = x86_64, aarch64
name     = AlmaLinux 8 High Availability (%(arch)s)
base_channels = almalinux8-%(arch)s
repo_url = https://mirrors.almalinux.org/mirrorlist/8/ha

[almalinux8-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, aarch64
base_channels = almalinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/

[almalinux8-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, aarch64
base_channels = almalinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/

[almalinux8-epel8]
label    = epel8-%(base_channel)s
name     = EPEL 8 for %(base_channel_name)s
archs    = x86_64, ppc64le, aarch64
base_channels = almalinux8-%(arch)s
gpgkey_url = http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8
gpgkey_id = 2F86D6A1
gpgkey_fingerprint = 94E2 79EB 8D8F 25B2 1810 ADF1 21EA 45AB 2F86 D6A1
repo_url = http://mirrors.fedoraproject.org/mirrorlist?repo=epel-8&arch=%(arch)s

[rockylinux8]
archs    = x86_64, aarch64
name     = RockyLinux 8 (%(arch)s)
gpgkey_url = https://dl.rockylinux.org/pub/rocky/RPM-GPG-KEY-rockyofficial
gpgkey_id = 6D745A60
gpgkey_fingerprint = 7051 C470 A929 F454 CEBE 37B7 15AF 5DAC 6D74 5A60
repo_url = https://mirrors.rockylinux.org/mirrorlist?repo=BaseOS-8&arch=%(arch)s
dist_map_release = 8

[rockylinux8-appstream]
label    = %(base_channel)s-appstream
archs    = x86_64, aarch64
name     = Rocky Linux 8 - AppStream (%(arch)s)
base_channels = rockylinux8-%(arch)s
repo_url = https://mirrors.rockylinux.org/mirrorlist?repo=AppStream-8&arch=%(arch)s

[rockylinux8-extras]
label    = %(base_channel)s-extras
archs    = x86_64, aarch64
name     = Rocky Linux 8 - Extras (%(arch)s)
base_channels = rockylinux8-%(arch)s
repo_url = https://mirrors.rockylinux.org/mirrorlist?repo=extras-8&arch=%(arch)s

[rockylinux8-powertools]
label    = %(base_channel)s-powertools
archs    = x86_64, aarch64
name     = Rocky Linux 8 - PowerTools (%(arch)s)
base_channels = rockylinux8-%(arch)s
repo_url = https://mirrors.rockylinux.org/mirrorlist?repo=PowerTools-8&arch=%(arch)s

[rockylinux8-ha]
label    = %(base_channel)s-ha
archs    = x86_64, aarch64
name     = Rocky Linux 8 - High Availability (%(arch)s)
base_channels = rockylinux8-%(arch)s
repo_url = https://mirrors.rockylinux.org/mirrorlist?repo=HighAvailability-8&arch=%(arch)s

[rockylinux8-uyuni-client]
name     = Uyuni Client Tools for %(base_channel_name)s
archs    = %(_x86_archs)s, aarch64
base_channels = rockylinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable:/EL8-Uyuni-Client-Tools/EL_8/

[rockylinux8-uyuni-client-devel]
name     = Uyuni Client Tools for %(base_channel_name)s (Development)
archs    = %(_x86_archs)s, aarch64
base_channels = rockylinux8-%(arch)s
gpgkey_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/repodata/repomd.xml.key
gpgkey_id = %(_uyuni_gpgkey_id)s
gpgkey_fingerprint = %(_uyuni_gpgkey_fingerprint)s
repo_url = https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master:/EL8-Uyuni-Client-Tools/EL_8/

[rockylinux8-epel8]
label    = epel8-%(base_channel)s
name     = EPEL 8 for %(base_channel_name)s
archs    = x86_64, ppc64le, aarch64
base_channels = rockylinux8-%(arch)s
gpgkey_url = http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8
gpgkey_id = 2F86D6A1
gpgkey_fingerprint = 94E2 79EB 8D8F 25B2 1810 ADF1 21EA 45AB 2F86 D6A1
repo_url = http://mirrors.fedoraproject.org/mirrorlist?repo=epel-8&arch=%(arch)s
07070100000023000081B400000000000000000000000162C3F37D0000717F000000000000000000000000000000000000002800000000spacewalk-utils/spacewalk-utils.changes-------------------------------------------------------------------
Tue Jun 21 18:31:46 CEST 2022 - jgonzalez@suse.com

- version 4.3.12-1
  * add deprecation warning for spacewalk-clone-by-date
  * Add EPEL8 for Almalinux 8 and Rocky 8 in spacewalk-common-channels.ini

-------------------------------------------------------------------
Fri May 20 00:12:42 CEST 2022 - jgonzalez@suse.com

- version 4.3.11-1
  * openSUSE Leap 15.4 repositories 

-------------------------------------------------------------------
Wed May 04 15:23:36 CEST 2022 - jgonzalez@suse.com

- version 4.3.10-1
  * use mgr-ssl-cert-setup tool in spacewalk-hostname-rename to
    deploy certificates on the server

-------------------------------------------------------------------
Sat Apr 23 00:33:09 CEST 2022 - jgonzalez@suse.com

- version 4.3.9-1
  * Fix the channel name for the Debian11 Client Tools

-------------------------------------------------------------------
Tue Apr 19 12:42:43 CEST 2022 - jgonzalez@suse.com

- version 4.3.8-1
  * Use the official URL for Debian11 security updates

-------------------------------------------------------------------
Fri Mar 11 15:22:33 CET 2022 - jgonzalez@suse.com

- version 4.3.7-1
  * Fix changelog format

-------------------------------------------------------------------
Fri Mar 11 14:54:06 CET 2022 - jgonzalez@suse.com

- version 4.3.6-1
  * Removed pylint according to Fedora packaging guidelines.

-------------------------------------------------------------------
Tue Jan 18 14:01:26 CET 2022 - jgonzalez@suse.com

- version 4.3.5-1
  * require python macros for building
  * Add Debian 11 repositories

-------------------------------------------------------------------
Fri Dec 03 12:29:48 CET 2021 - jgonzalez@suse.com

- version 4.3.4-1
  * Use Almalinux Mirror List
  * Use EL8 Client Tools for EL8 clones, as CentOS8 will be end of
    life by the end of the year

-------------------------------------------------------------------
Tue Nov 16 10:07:42 CET 2021 - jgonzalez@suse.com

- version 4.3.3-1
  * Enable aarch64 for AlmaLinux 8

-------------------------------------------------------------------
Fri Sep 17 12:12:12 CEST 2021 - jgonzalez@suse.com

- version 4.3.2-1
  * When renaming: don't regenerate CA, allow using third-party
    certificate and trigger pillar refresh (bsc#1190123)

-------------------------------------------------------------------
Mon Aug 09 11:07:38 CEST 2021 - jgonzalez@suse.com

- version 4.3.1-1
- Enable Uyuni Server and Proxy Stable for openSUSE Leap 15.3
- Add Rocky Linux 8 repositories
- Fix a typo at the AlmaLinux 8 Uyuni client tools for devel
- Fix the GPG key fingerprint for Springdale Linux 8
- Updated cloneByDate.py formatting to allow pylint pass.

-------------------------------------------------------------------
Fri Jun 18 15:18:20 CEST 2021 - jgonzalez@suse.com

- version 4.2.11-1
- Use the right URLs for the AlmaLinux 8 Uyuni client tools
- Add SLE Updates and Backport Updates repositories for openSUSE
  Leap 15.3

-------------------------------------------------------------------
Fri Jun 04 12:43:25 CEST 2021 - jgonzalez@suse.com

- version 4.2.10-1
- Update devel common channels definitions to openSUSE Leap 15.3

-------------------------------------------------------------------
Mon May 24 12:38:52 CEST 2021 - jgonzalez@suse.com

- version 4.2.9-1
- spacewalk-hostname-rename: change hostname in /root/.mgr-sync (bsc#1183994)
- adapt hostname rename check to allow also short hostname in various
  hostname files on the filesystem (bsc#1176512)

-------------------------------------------------------------------
Wed May 05 16:39:14 CEST 2021 - jgonzalez@suse.com

- version 4.2.8-1
- Align the modules.yaml of target channel after cloning errata (bsc#1182810)
- Add client tools for SLES12SP5 and SLES4SAP 12SP4/12SP5/15SP2/15SP3
- Fix the URLs for SLE15 Stable client tools
- Add openSUSE Tumbleweed/microOS

-------------------------------------------------------------------
Mon Apr 19 16:54:14 CEST 2021 - jgonzalez@suse.com

- version 4.2.7-1
- Add SLE15SP3

-------------------------------------------------------------------
Mon Apr 19 11:37:47 CEST 2021 - jgonzalez@suse.com

- version 4.2.6-1
- Bugfix for ubuntu-18.04 repo urls: multiverse, restricted and backports

-------------------------------------------------------------------
Fri Apr 16 13:24:29 CEST 2021 - jgonzalez@suse.com

- version 4.2.5-1
- Add openSUSE Leap 15.3
- Add aarch64 for CentOS7/8 and Oracle7/8
- Add AlmaLinux 8 repositories
- Add Amazon Linux 2 repositories
- Add Alibaba Cloud Linux 2 repositories
- Add current UEK repos to Oracle Linux
- Add multiverse, restricted and backports to Ubuntu 16.04, 18.04 and 20.04
- Add the Universe Security repositories for Ubuntu
- Complete the fix arch handling in spacewalk-common-channels

-------------------------------------------------------------------
Wed Jan 27 13:06:52 CET 2021 - jgonzalez@suse.com

- version 4.2.4-1
- Fix arch handling in spacewalk-common-channels
- Fix modular data handling for cloned channels (bsc#1177508)
- spacewalk-common-channels: Update CentOS6 URLs to use vault.centos.org
- spacewalk-common-channels: re-use repositories when different channels
  use the same one

-------------------------------------------------------------------
Thu Dec 03 13:54:48 CET 2020 - jgonzalez@suse.com

- version 4.2.3-1
- Fixed Python files to pass pylint.
- Modified SPEC to work with RHEL8.
- Remove Debian 9 and 10 channels for SUSE Manager, now provided by SCC data

-------------------------------------------------------------------
Wed Nov 25 12:25:22 CET 2020 - jgonzalez@suse.com

- version 4.2.2-1
- Enable ppc64le for CentOS7 and CentOS8

-------------------------------------------------------------------
Fri Sep 18 12:17:53 CEST 2020 - jgonzalez@suse.com

- version 4.2.1-1
- Avoid exceptions on the logs when looking for channels that do
  not exist (bsc#1175529)
- Update package version to 4.2.0

-------------------------------------------------------------------
Thu Jul 23 13:32:55 CEST 2020 - jgonzalez@suse.com

- version 4.1.11-1
- Channels for Uyuni Proxy and Server for openSUSE Leap 15.2
- Add aarch64 for openSUSE Leap 15.1 and 15.2

-------------------------------------------------------------------
Wed Jun 10 12:21:25 CEST 2020 - jgonzalez@suse.com

- version 4.1.10-1
- Add channels for openSUSE Leap 15.1 and Uyuni Client Tools, and
  Uyuni Client Tools for SLE15SP2
- Use HTTPS for Oracle repositories

-------------------------------------------------------------------
Wed May 20 10:56:39 CEST 2020 - jgonzalez@suse.com

- version 4.1.9-1
- Create separate channels for Oracle Client Tools (use CentOS URLs)
- Add repositories for Ubuntu 20.04 LTS

-------------------------------------------------------------------
Mon Apr 13 09:34:52 CEST 2020 - jgonzalez@suse.com

- version 4.1.8-1
- Add Astra Linux CE "Orel" repositories
- check for delimiter as well when detecting current phase (bsc#1164771)

-------------------------------------------------------------------
Thu Mar 19 12:16:45 CET 2020 - jgonzalez@suse.com

- version 4.1.7-1
- fix spacewalk-common-channel when base channel exists
- Add Springdale Linux 6, 7 and 8 repositories

-------------------------------------------------------------------
Wed Mar 11 10:57:45 CET 2020 - jgonzalez@suse.com

- version 4.1.6-1
- Split spacewalk-utils into two packages:
  * spacewalk-utils with tools supported on SUSE Manager
  * spacewalk-utils-extras with tools unsupported on SUSE Manager
 
-------------------------------------------------------------------
Mon Feb 17 12:52:28 CET 2020 - jgonzalez@suse.com

- version 4.1.5-1
- Add Oracle Enterprise Linux 8 repositories

-------------------------------------------------------------------
Thu Jan 30 17:17:13 CET 2020 - jgonzalez@suse.com

- version 4.1.4-1
- Use new URLs for Uyuni Server and Proxy Stable, and unify channel
  names for Devel

-------------------------------------------------------------------
Thu Jan 30 14:49:16 CET 2020 - jgonzalez@suse.com

- version 4.1.3-1
- Add FQDN resolver for spacewalk-manage-channel-lifecycle (bsc#1153578)
- Fix typo in CentOS 7 Software Collection

-------------------------------------------------------------------
Wed Jan 22 12:14:49 CET 2020 - jgonzalez@suse.com

- version 4.1.2-1
- Add universe updates to Ubuntu common channels
- Rename ubuntu-XXXX-amd64-main-universe to ubuntu-XXXX-amd64-universe and
  ubuntu-XXXX-amd64-main-universe-uyuni to ubuntu-XXXX-amd64-universe-uyuni
  in common channels
- Enable CentOS8 at spacewalk-common-channels (bsc#1159206)

-------------------------------------------------------------------
Wed Nov 27 17:04:04 CET 2019 - jgonzalez@suse.com

- version 4.1.1-1
- Add FQDN resolver for spacewalk-manage-channel-lifecycle (bsc#1153578)
- build as python3 only package
- require uyuni-common-libs
- add Fedora 30 to spacewalk-common-channels
- remove EOL products Fedora 27 and 28 from spacewalk-common-channels
- add command line argument --ssl-city=<SSL_CITY> to spacewalk-hostname-rename
- Update URLs for Uyuni Master Server and Proxy
- Bump version to 4.1.0 (bsc#1154940)
- fix hostname-rename script for cobbler files
- Set openSUSE Leap 15.1 as new Base OS for Uyuni Server and Proxy
- Remove EOL openSUSE Leap 42.3 from spacewalk-common-channels
- Require uyuni-base-common for /etc/rhn
- add spacewalk-watch-channel-sync.sh to spacewalk-utils

-------------------------------------------------------------------
Thu Aug 01 19:00:44 CEST 2019 - jgonzalez@suse.com

- version 4.0.12-1
- common-channels: Duplicate Ubuntu channels for Uyuni

-------------------------------------------------------------------
Wed Jul 31 17:37:23 CEST 2019 - jgonzalez@suse.com

- version 4.0.11-1
- common-channels: Fix repo type assignment for type YUM (bsc#1151683)
- Use the same client tools for openSUSE Leap 15.0 for all openSUSE Leap
  15.X releases.
- Fixes SSL hostname matching (bsc#1141663)
- hostname-rename: change hostname in cobbler db and autoinst data
- Adds support for Ubuntu and Debian channels to spacewalk-common-channels.

-------------------------------------------------------------------
Wed May 15 15:20:26 CEST 2019 - jgonzalez@suse.com

- version 4.0.10-1
- SPEC cleanup
- Add support for openSUSE Leap 15.1
- Fixes numerous Python3 errors in spacewalk-manage-channel-lifecycle
- Fixes an attempt to write in binary mode a string type (bsc#1132361)
- Adds some integration and unit tests

-------------------------------------------------------------------
Mon Apr 22 12:17:00 CEST 2019 - jgonzalez@suse.com

- version 4.0.9-1
- Fix spacewalk-common-channels interpolation error (bsc#1131988)
- add makefile and pylint configuration

-------------------------------------------------------------------
Fri Mar 29 10:33:32 CET 2019 - jgonzalez@suse.com

- version 4.0.8-1
- Add support on depsolver to use a custom input file and produce YAML output.
- Fix depsolver to use libsolv instead of yum library.
- Fix spacewalk-clone-by-date to not depend on yum.
- Fix issues in spacewalk scripts after migration to Python 3.

-------------------------------------------------------------------
Mon Mar 25 16:48:23 CET 2019 - jgonzalez@suse.com

- version 4.0.7-1
- Sync with Spacewalk
- fix taskotop crash caused by integer argument expected

-------------------------------------------------------------------
Wed Feb 27 13:04:42 CET 2019 - jgonzalez@suse.com

- version 4.0.6-1
- Fix package installation on python3 based operating systems
- Sync with Spacewalk
- Fix typo in spacewalk-manage-channel-lifecycle

-------------------------------------------------------------------
Thu Jan 17 14:45:24 CET 2019 - jgonzalez@suse.com

- version 4.0.5-1
- Make spacewalk-utils python3 compatible

-------------------------------------------------------------------
Wed Jan 16 12:25:40 CET 2019 - jgonzalez@suse.com

- version 4.0.4-1
- Create directory with correct ownership to prevent file conflicts

-------------------------------------------------------------------
Mon Dec 17 14:42:10 CET 2018 - jgonzalez@suse.com

- version 4.0.3-1
- Exit with an error if spacewalk-common-channels does not match
  any channel
- Add Uyuni Client Tools for CentOS, openSUSE and SLES
- Removed channels for distributions EoL
- Removed Spacewalk Server channels
- Removed Spacewalk Client Tools channels for CentOS and openSUSE
- Fix typo at --phases option help

-------------------------------------------------------------------
Fri Oct 26 10:46:14 CEST 2018 - jgonzalez@suse.com

- version 4.0.2-1
- remove spacewalk-archive-audits

-------------------------------------------------------------------
Fri Aug 10 15:34:58 CEST 2018 - jgonzalez@suse.com

- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)

-------------------------------------------------------------------
Tue Jun 05 10:12:23 CEST 2018 - jgonzalez@suse.com

- version 2.8.18.2-1
- remove unmaintained openSUSE Leap versions from common channels
- add openSUSE Leap 15.0 to common channels
- Add an ability to specify admin credentials in settings.conf (bsc#1085484)

-------------------------------------------------------------------
Mon May 07 15:25:51 CEST 2018 - jgonzalez@suse.com

- version 2.8.18.1-1
- clone-by-date removes packages only if the list is not empty (bsc#1089396)

-------------------------------------------------------------------
Mon Apr 23 09:19:20 CEST 2018 - jgonzalez@suse.com

- version 2.8.17.1-1
- Sync with upstream (bsc#1083294)
- Update gpgs in database
- Update common channels with latest releases

-------------------------------------------------------------------
Mon Mar 26 09:09:33 CEST 2018 - jgonzalez@suse.com

- version 2.8.14.1-1
- Sync with upstream (bsc#1083294)
- 1537766 - make sure to send output to log and stdout
- 1537766 - reject negative numbers for batch/interval/age

-------------------------------------------------------------------
Mon Mar 05 09:00:29 CET 2018 - jgonzalez@suse.com

- version 2.8.13.2-1
- remove empty clean section from spec (bsc#1083294)

-------------------------------------------------------------------
Wed Feb 28 10:05:43 CET 2018 - jgonzalez@suse.com

- version 2.8.13.1-1
- 1537766 - Fix broken DELETE in postgresql

-------------------------------------------------------------------
Fri Feb 02 13:10:37 CET 2018 - jgonzalez@suse.com

- version 2.8.9.2-1
- 1537766 - Add spacewalk-manage-snapshots, to give sw-admin a snapshot-mgt
  tool

-------------------------------------------------------------------
Wed Jan 17 13:02:31 CET 2018 - jgonzalez@suse.com

- version 2.8.8.1-1
- add nightly-server repository for Fedora 27
- add nightly-client repository for Fedora 27
- add Fedora 27 repositories
1bedaa21757e199b0a88bb1562506040ca33fd8a remove Fedora 24 as it is EOL now
- Remove restrictions imposed on regex used in 'removelist' parameter passed
  to spacewalk-clone-by-date that allowed only exact match
  (bsc#1075254)

-------------------------------------------------------------------
Tue Aug 08 11:32:45 CEST 2017 - fkobzik@suse.de

- version 2.7.10.5-1
- Don't show password on input in spacewalk-manage-channel-lifecycle
  (bsc#1043795)

-------------------------------------------------------------------
Mon Jun 12 09:03:22 CEST 2017 - mc@suse.de

- version 2.7.10.4-1
- add opensuse_leap42_3 and remove opensuse13_2 (bsc#1043778)

-------------------------------------------------------------------
Mon May 29 15:33:16 CEST 2017 - mc@suse.de

- version 2.7.10.3-1
- minor tweak to archive-audits manpage
- taskotop retrieve list of each task by end date, not start date

-------------------------------------------------------------------
Wed May 03 16:09:12 CEST 2017 - michele.bologna@suse.com

- version 2.7.10.2-1
- update CentOS7 addon repos
- removed definitions of EOLed products
- add new channels Fedora 25 and Spacewalk 2.6

-------------------------------------------------------------------
Mon Apr 03 14:45:30 CEST 2017 - mc@suse.de

- version 2.7.10.1-1
- add UTF-8 encoding before output on stdout
- Update taskotop man page
- Add command line argument to optionally specify the number of times
  taskotop should iterate before exiting.
- Modify output columns to include task run end time and optional
  task run start time.  Drop the status column because its redundant.
- Add new 'each task' display mode

-------------------------------------------------------------------
Fri Mar 31 09:39:51 CEST 2017 - mc@suse.de

- version 2.7.8.1-1
- docbook manpage for delete-old-systems-interactive
- rewrite delete-old-systems-interactive into python
- add taskomaticd process info in optional header to taskotop
- remove system currency generation script
- taskotop enhancements

-------------------------------------------------------------------
Tue Mar 07 14:42:32 CET 2017 - mc@suse.de

- version 2.7.6.1-1
- Updated links to github in spec files
- add exception processing in taskotop
- tweaked manpage a bit
- add additional info about taskotop
- add Fedora 25 repositories

-------------------------------------------------------------------
Tue Feb 07 15:10:11 CET 2017 - michele.bologna@suse.com

- version 2.7.1.1-1
- Use spacewalk 2.6 for openSUSE Leap 42.2
- Add channels for openSUSE Leap 42.2

-------------------------------------------------------------------
Wed Jan 11 16:40:21 CET 2017 - michele.bologna@suse.com

- version 2.7.0.1-1
- Bumping package versions for 2.7.

-------------------------------------------------------------------
Fri Dec 16 13:55:48 CET 2016 - michele.bologna@suse.com

- version 2.5.6.5-1
- use spacewalk 2.6 for openSUSE Leap 42.2
- add channels for openSUSE Leap 42.2

-------------------------------------------------------------------
Tue May 24 15:29:34 CEST 2016 - kwalter@suse.com

- version 2.5.6.4-1
- taskotop: a utility to monitor what Taskomatic is doing

-------------------------------------------------------------------
Wed Apr 06 08:48:55 CEST 2016 - mc@suse.de

- version 2.5.6.3-1
- spacewalk-clone-by-date - package names may contain special regexp
  chars now

-------------------------------------------------------------------
Mon Mar 21 16:25:39 CET 2016 - mc@suse.de

- version 2.5.6.2-1
- fix file permissions (bsc#970550)

-------------------------------------------------------------------
Tue Jan 26 14:03:16 CET 2016 - mc@suse.de

- version 2.5.6.1-1
- Extended allowed delimiters to include '.'
- Added UEK4 channels for Oracle Linux 6 and 7.
- add openSUSE Leap 42.1 (bsc#961353)
- remove outdated openSUSE distribution 13.1

-------------------------------------------------------------------
Sat Jan 16 11:19:15 CET 2016 - mc@suse.de

- version 2.5.4.1-1
- fixing typo in 'archs'

-------------------------------------------------------------------
Tue Jan 05 15:54:59 CET 2016 - mc@suse.de

- version 2.5.3.2-1
- Add delimiter option for spacewalk-manage-channel-lifecycle

-------------------------------------------------------------------
Wed Dec 16 11:16:50 CET 2015 - mc@suse.de

- version 2.5.3.1-1
- Updated Oracle yum repo URLs and added new repositories for OL6 and OL7.
- make clone-by-date python 2.4 compatible

-------------------------------------------------------------------
Mon Nov 30 11:05:27 CET 2015 - mc@suse.de

- version 2.5.1.1-1
- adapt checks for cert trust dirs
- disable spacewalk-dump-schema functionality when rhn-upgrade
  package is found

-------------------------------------------------------------------
Wed Oct 07 13:32:35 CEST 2015 - mc@suse.de

- version 2.5.0.1-1
- spacewalk-hostname-rename knows to start postgresql
- prevent spacewalk-hostname-rename to fail with an IPv6 address
- general bugfixes

-------------------------------------------------------------------
Tue Mar 31 14:39:32 CEST 2015 - mc@suse.de

- version 2.1.27.13-1
- spacewalk-manage-channel-lifecycle: Fix automatic assumption of
  first phase (bsc#922294)

-------------------------------------------------------------------
Tue Feb 03 11:58:46 CET 2015 - mc@suse.de

- version 2.1.27.12-1
- make sql queries Oracle 10g compatible
- disable and enable indexes in Oracle, set time format and control parameters
- added Fedora 21 channels

-------------------------------------------------------------------
Thu Dec 04 13:28:41 CET 2014 - mc@suse.de

- version 2.1.27.11-1
- prevent empty dir creation by scbd
- Updated spacewalk-common-channels.ini to include Oracle Linux 7
- fix error if blacklist / removelist not in scbd config file
- improve error messaging in scbd

-------------------------------------------------------------------
Fri Nov 07 13:15:08 CET 2014 - mc@suse.de

- version 2.1.27.10-1
- Add openSUSE 13.2 repositories to spacewalk-common-channels
- extending spacewalk-api man page with usage of boolean values

-------------------------------------------------------------------
Tue Oct 14 15:15:59 CEST 2014 - mc@suse.de

- version 2.1.27.9-1
- spacewalk-manage-channel-lifecycle: put default phases in help output
- Allow use of "-" symbol in phase names

-------------------------------------------------------------------
Fri Sep 12 15:47:56 CEST 2014 - mc@suse.de

- version 2.1.27.8-1
- man page update: clone-by-date doesn't support 3rd party repos
- improve clone-by-date dependency resolution
- Check if dest parent is cloned
- sw-clone-by-date --dry-run
- make clone-by-date able to specify --parents from config file
- you should not have to specify both parent channels for clone-by-
  date
- CentOS 7 + EPEL 7 channels

-------------------------------------------------------------------
Tue Jun 17 10:17:09 CEST 2014 - jrenner@suse.de

- version 2.1.27.7-1
- Fixed spacewalk-hostname-rename to work with postgresql backend
- Added limitation of spacewalk-clone-by-date for RHEL4 and earlier

-------------------------------------------------------------------
Tue May 27 17:13:27 CEST 2014 - mc@suse.de

- version 2.1.27.6-1
- Show SLES channels in help output

-------------------------------------------------------------------
Wed May 21 11:07:19 CEST 2014 - mc@suse.de

- version 2.1.27.5-1
- spacewalk-manage-channel-lifecycle: Added better channel tree printing
- spacewalk-manage-channel-lifecycle: Added multiple workflows
- spacewalk-manage-channel-lifecycle: Fixing None-channel
- spacewalk-manage-channel-lifecycle: introduce a configuration file

-------------------------------------------------------------------
Fri May 16 13:02:44 CEST 2014 - mc@suse.de

- version 2.1.27.4-1
- Assume raw mode if the directory with definition files doesn't exist
- spacewalk-sync-setup: Refined error message when apply or describe-templates
  is passed w/o hosts
- spacewalk-sync-setup: Renamed options,'dry-run' to 'describe-templates' with
  'dt' and 'default-ok' to 'configured-hosts' with 'ch'
- spacewalk-sync-setup: Show help, if no options passed but config already
  exists.
- spacewalk-sync-setup: Dry run default should be False.
- spacewalk-sync-setup: Processing master and slave credentials in this order.
- spacewalk-sync-setup: Require master or slave hostname or --default-ok option
  on apply or dry run modes.
- spacewalk-sync-setup: Always initialize config with the real values.
- spacewalk-sync-setup: Asking for username and password
- spacewalk-sync-setup: Branding change from Satellite to Spacewalk
- spacewalk-sync-setup: Fix duplicate error messaging.
- spacewalk-sync-setup: Fix test FQDN input.
- spacewalk-sync-setup: Fix crash on accessing required fields before
  validation. Fix plain text visible password.
- spacewalk-sync-setup: Fix option parsing

-------------------------------------------------------------------
Tue May 06 15:16:37 CEST 2014 - mc@suse.de

- version 2.1.27.3-1
- sw-clone-by-date validation update
- fix problem where --channels=src_label dst_label threw an error

-------------------------------------------------------------------
Thu Mar 27 10:18:42 CET 2014 - fcastelli@suse.com

- version 2.1.27.2-1
- Make it possible to specify description from clone-be-date
- Fixing possible nonetype error
- Let spacewalk-api send username and password as strings
- Fixing long lines in clone-by-date
- Add option to clone-by-date to only clone specified errata
- Allow user to specify channel name through clone-by-date
- Spacwalk-repo-sync prints the same message for every channel.

-------------------------------------------------------------------
Fri Feb 07 13:54:04 CET 2014 - mc@suse.de

- version 2.1.27.1-1
- create api for channel errata syncing, have clone-by-date call it
- adding postgresql92-postgresql to possible db service names
- Changed gpg keys so they match reality.
- Removing unsupported Fedora 17
- Adding Fedora 20 to spacewalk-common-channels
- adding 2.1 repositories to spacewalk-common-channels
- remove openSUSE 12.2 and add openSUSE 13.1 channels
- clone-by-date: fix with dependency check enabled (bnc#858652)
- Updating the copyright years info

-------------------------------------------------------------------
Mon Jan 13 09:43:53 CET 2014 - mc@suse.de

- version 2.1.23.1-1
- fix man page generation on SUSE
- fixed man page encoding

-------------------------------------------------------------------
Mon Dec 09 16:20:01 CET 2013 - mc@suse.de

- version 2.1.22.1-1
- switch to 2.1

-------------------------------------------------------------------
Wed Jun 12 13:38:38 CEST 2013 - mc@suse.de

- version 1.7.15.11-1
- work around buggy perl version in SLES11
  (manually specify encoding options for spacewalk-dump-schema)
- check to see if the key exists before initializing parent channel key
- fixed promote phase naming errors
- correctly quote the database name
- disable, enable & rebuild indexes for migrations

-------------------------------------------------------------------
Thu Apr 04 15:28:11 CEST 2013 - mc@suse.de

- version 1.7.15.10-1
- add openSUSE 12.3 to spacewalk-common-channels config
- add script for automated user creation from LDAP (FATE#312615)

-------------------------------------------------------------------
Thu Nov 22 15:49:53 CET 2012 - jrenner@suse.de

- version 1.7.15.9-1
- add openSUSE 12.2 to common channels
- spacewalk-setup-cobbler do not support --enable-tftp option (bnc#787931)
- install missing spacewalk-manage-channel-lifecycle script

-------------------------------------------------------------------
Fri Sep 28 16:10:08 CEST 2012 - mc@suse.de

- version 1.7.15.8-1
- make sure the regular expressions actually match.
- The Enhancement Advisory is actually Product Enhancement Advisory.
- strip non-number chars from date format

-------------------------------------------------------------------
Tue Aug 07 16:41:16 CEST 2012 - mc@suse.de

- version 1.7.15.7-1
- prevent spacewalk-hostname-rename to fail with an IPv6 address

-------------------------------------------------------------------
Mon Jun 25 10:24:30 CEST 2012 - mc@suse.de

- version 1.7.15.6-1
- adding license files for gpl v2+v3 (bnc#764869)
- system.list_user_systems() now returns localtime

-------------------------------------------------------------------
Thu May 31 10:53:01 CEST 2012 - mc@suse.de

- version 1.7.15.5-1
- spacewalk-clone-by-date failes with TypeError when on Postgres
  database.
- use spacewalk-setup-cobbler instead of outdated cobbler-setup
- Revert "set localhost instead of hostname to tnsnames.ora and listener.ora"
- added version for scientific linux default channel mapping

-------------------------------------------------------------------
Mon May 14 10:50:14 CEST 2012 - mc@suse.de

- version 1.7.15.4-1
- The plan_table is not part of our schema, do not dump it.
- added dist_map_release for automatic OS->base channel mapping
- set dist release map via setDefaultMap
- removed fedora12/13/14 which are long time EOL

-------------------------------------------------------------------
Fri Apr 27 16:24:46 CEST 2012 - mc@suse.de

- version 1.7.15.3-1
- Better regex for getting system_id in apply_errata
- determine the advisory type by parsing "advisory_type"
- implement -n for apply_errata

-------------------------------------------------------------------
Fri Apr 20 15:31:39 CEST 2012 - mc@suse.de

- version 1.7.15.2-1
- make generated SSL certificate publicly available
- new option to list snapshot details
- support for psql syntax
- added spacewalk-manage-channel-lifecycle script
- spacewalk-clone-by-date manpage bugfixes/cleanups
- reused function from spacewalk.common.cli

-------------------------------------------------------------------
Wed Mar 21 17:54:26 CET 2012 - mc@suse.de

- version 1.7.15.1-1
- Bumping package version

-------------------------------------------------------------------
Sun Jan 30 15:32:23 CET 2011 - mc@suse.de

- backport upstrem fixes

-------------------------------------------------------------------
Wed Sep 15 10:09:06 CEST 2010 - mantel@suse.de

- Initial release of spacewalk-utils

-------------------------------------------------------------------
07070100000024000081FD00000000000000000000000162C3F37D0000070B000000000000000000000000000000000000002300000000spacewalk-utils/sw-system-snapshot#!/usr/bin/python3
#
# wrapper for Script to migrate systems between Orgs.
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# Authors: Brad Buckingham
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

if __name__ != '__main__':
    raise ImportError("module cannot be imported")

import sys


def systemExit(code, msgs=None):
    "Exit with a code and optional message(s). Saved a few lines of code."

    if msgs:
        if type(msgs) not in [type([]), type(())]:
            msgs = (msgs, )
        for msg in msgs:
            sys.stderr.write(str(msg) + '\n')
    sys.exit(code)

_LIBPATH = "/usr/share/rhn"
# add to the path if need be
if _LIBPATH not in sys.path:
    sys.path.append(_LIBPATH)

try:
    # NOTE: importing
    from utils import systemSnapshot
except KeyboardInterrupt:
    systemExit(0, "\nUser interrupted process.")
except ImportError as e:
    systemExit(2, "Unable to find system snapshot tool.\n"
                  "Error: %s" % e)


def main():
    # execute
    try:
        return systemSnapshot.main()
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")

    return 0

if __name__ == '__main__':
    try:
        sys.exit(abs(main() or 0))
    except KeyboardInterrupt:
        systemExit(0, "\nUser interrupted process.")
07070100000025000081B400000000000000000000000162C3F37D0000047D000000000000000000000000000000000000002000000000spacewalk-utils/Makefile.pythonTHIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../rel-eng/Makefile.python

# Docker tests variables
DOCKER_CONTAINER_BASE = uyuni-master
DOCKER_REGISTRY       = registry.mgr.suse.de
DOCKER_RUN_EXPORT     = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES        = -v "$(CURDIR)/..:/manager"

__pylint ::
	$(call update_pip_env)
	pylint --rcfile=pylintrc $(shell grep python ./* | grep -rIzl '^#!/usr/bin') > reports/pylint.log || true

__pytest ::
	$(call update_pip_env)
	$(call install_pytest)
	cd tests
	pytest --disable-warnings --tb=native --color=yes -v

docker_pylint ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/utils; make -f Makefile.python __pylint"

docker_pytest ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/utils; make -f Makefile.python __pytest"

docker_shell ::
	docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash
07070100000026000081B400000000000000000000000162C3F37D00009CC1000000000000000000000000000000000000001F00000000spacewalk-utils/cloneByDate.py#
# Clonse channels by a particular date
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

import os
import sys
import shutil
import tempfile
import pprint
import subprocess
import datetime
import re

try:
    import xmlrpclib
except ImportError:
    import xmlrpc.client as xmlrpclib  # pylint: disable=F0401

try:
    from spacewalk.common import rhnLog
    from spacewalk.common.rhnConfig import CFG, initCFG
    from spacewalk.common.rhnLog import log_debug, log_clean
    from spacewalk.satellite_tools.progress_bar import ProgressBar
    from spacewalk.server import rhnSQL
except ImportError:
    # pylint: disable=F0401
    _LIBPATH = "/usr/share/rhn"
    if _LIBPATH not in sys.path:
        sys.path.append(_LIBPATH)
    from server import rhnSQL
    from common import rhnLog
    from common.rhnLog import log_debug, log_clean
    from common.rhnConfig import CFG, initCFG
    from satellite_tools.progress_bar import ProgressBar

from .depsolver import DepSolver


LOG_LOCATION = '/var/log/rhn/errata-clone.log'


def confirm(txt, options):
    if not options.assumeyes:
        response = input(txt)
        while ['y', 'n'].count(response.lower()) == 0:
            response = input(txt)
        if response.lower() == "n":
            print("Cancelling")
            sys.exit(0)
        print("")


def validate(channel_labels):
    tmp_dirs = {}
    for llabel in channel_labels:
        label = llabel[0]
        path = repodata(label)
        tmp = tempfile.mkdtemp()
        tmp_dirs[label] = tmp
        shutil.copytree(path, "%s/repodata/" % tmp)

    cmd = ["repoclosure"]
    for label, path in list(tmp_dirs.items()):
        cmd.append("--repofrompath=%s,%s" % (label, path))
        cmd.append("--repoid=%s" % (label))
    subprocess.call(cmd)

    for tmp in list(tmp_dirs.values()):
        shutil.rmtree(tmp, True)


def repodata(label):
    return "%s/rhn/repodata/%s" % (CFG.REPOMD_CACHE_MOUNT_POINT, label)


def create_repodata_link(src_path, dst_path):
    if not os.path.exists(os.path.dirname(dst_path)):
        # create a dir if missing
        os.makedirs(os.path.dirname(dst_path))
    if not os.path.exists(dst_path):
        if os.path.lexists(dst_path):
            # remove dead links
            os.unlink(dst_path)
        # create the link
        os.symlink(src_path, dst_path)


def remove_repodata_link(link_path):
    if os.path.exists(link_path):
        return os.unlink(link_path)
    return None


def diff_packages(old, new):
    old_hash = {}
    new_hash = {}
    to_ret = []

    for pkg in old:
        old_hash[pkg["id"]] = pkg
    for pkg in new:
        new_hash[pkg["id"]] = pkg
    id_diff = set(new_hash.keys()) - set(old_hash.keys())
    for pkg_id in id_diff:
        to_ret.append(new_hash[pkg_id])
    return to_ret


def main(options):
    xmlrpc = RemoteApi(options.server, options.username, options.password)
    db = DBApi()
    initCFG('server')
    rhnLog.initLOG(LOG_LOCATION)

    cleansed = vars(options)
    cleansed["password"] = "*****"
    log_clean(0, "")
    log_debug(0, "Started spacewalk-clone-by-date")
    log_clean(0, pprint.pformat(cleansed))

    print("Reading repository information.")
    if options.use_update_date:
        options.use_update_date = 'update_date'
    else:
        options.use_update_date = 'issue_date'
    print("Using %s." % options.use_update_date)

    cloners = []
    needed_channels = []
    errata = None
    if options.errata:
        errata = set(options.errata)
    for channel_list in options.channels:
        parents = None
        if options.parents:
            # if only the dest parent is specified, look up the src parent
            if len(options.parents) == 1:
                src_parent = xmlrpc.get_original(options.parents[0])
                if not src_parent:
                    print(("Channel %s is not a cloned channel." % options.parents[0]))
                    sys.exit(1)
                print("Looking up the original channel for %s, %s found" % (
                    options.parents[0], src_parent))
                options.parents = [src_parent] + options.parents
            # options.parents is only set by command line, this must be the
            # only channel tree
            parents = options.parents

        # Handle the new-style channel specification that uses
        # key value pairs. Transform into channel / parent setup that
        # ChannelTreeCloner expects. This code has to be here now that you can
        # specify parents for multiple trees.
        # TODO: the channel / parents structure needs to be cleaned up throught
        # clone-by-date. Probably best thing would to make everywhere use the
        # dict structure instead of the list structure.
        for src_channel in list(channel_list.keys()):
            dest_channel = channel_list[src_channel]
            # new-style config file channel specification
            if isinstance(dest_channel, dict):
                if 'label' not in dest_channel:
                    raise UserError("You must specify a label for the clone of %s" % src_channel)
                label = dest_channel['label']
                if 'name' in dest_channel:
                    name = dest_channel['name']
                else:
                    name = label
                if 'summary' in dest_channel:
                    summary = dest_channel['summary']
                else:
                    summary = label
                if 'description' in dest_channel:
                    description = dest_channel['description']
                else:
                    description = label
                # This is the options.parents equivalent for config files.
                # Add channels to parents option and remove from channels.
                if ('existing-parent-do-not-modify' in dest_channel
                        and dest_channel['existing-parent-do-not-modify']):
                    parents = [src_channel, label]
                    del channel_list[src_channel]
                else:  # else tranform channel_list entry to the list format
                    channel_list[src_channel] = [label, name, summary,
                                                 description]

        # before we start make sure we can get repodata for all channels
        # involved.
        channel_labels = list(channel_list.keys())
        for label in channel_labels:
            if not os.path.exists(repodata(label)):
                raise UserRepoError(label)
        # ensure the parent's channel metadata is available
        if parents:
            for label in parents:
                if not os.path.exists(repodata(label)):
                    raise UserRepoError(label)

        # if cloning specific errata validate that they actually exist
        # in the original channels
        if options.errata:
            for channel in channel_labels:
                channel_errata = set(xmlrpc.list_errata(channel))
                if set(errata - channel_errata):
                    print(("Error: all errata specified with --errata must "
                           + "exist in every original channel cloned in "
                           + "this operation."))
                    print(("Channel %s does not contain these errata: %s" %
                           (channel, errata - channel_errata)))
                    sys.exit(1)

        tree_cloner = ChannelTreeCloner(channel_list, xmlrpc, db,
                                        options.to_date, options.blacklist,
                                        options.removelist,
                                        options.security_only, options.use_update_date,
                                        options.no_errata_sync, errata,
                                        options.skip_errata_depsolve, parents)

        cloners.append(tree_cloner)
        needed_channels += list(tree_cloner.needing_create().values())

    if options.validate:
        if needed_channels:
            raise UserError("Cannot validate channels that do not exist %s" %
                            ', '.join(map(str, needed_channels)))
        for channel_list in options.channels:
            validate(list(channel_list.values()))
        return

    if needed_channels:
        print("\nBy continuing the following channels will be created: ")
        print(", ".join(needed_channels))
        confirm("\nContinue with channel creation (y/n)?", options)
        for cloner in cloners:
            cloner.create_channels(options.skip_depsolve)

    for tree_cloner in cloners:
        tree_cloner.prepare()

    if options.dry_run:
        for tree_cloner in cloners:
            d_errata = {}
            separator = "|"
            d_errata = tree_cloner.get_errata_to_clone()
            now = datetime.datetime.now()
            for ch in d_errata:
                log_file = ch + "_" + now.strftime("%Y-%m-%d-%H:%M")
                print("# Log file: " + log_file)
                fh = open(log_file, 'w')
                for errata in d_errata[ch]:
                    line = ""
                    for item in list(set(errata) - set(['id'])):
                        line = line + str(errata[item]) + separator
                    fh.write(line + "\n")
                fh.close()
        sys.exit(0)

    print("\nBy continuing the following will be cloned:")
    total = 0
    for cloner in cloners:
        cloner.pre_summary()
        total += cloner.pending()

    if total == 0:
        print ("\nNo errata to clone, checking removelist.")
        for cloner in cloners:
            cloner.remove_packages()
        sys.exit(0)

    confirm("\nContinue with clone (y/n)?", options)
    for cloner in cloners:
        cloner.clone(options.skip_depsolve)
        cloner.remove_packages()


class ChannelTreeCloner:

    """Usage:
        a = ChannelTreeCloner(channel_hash, xmlrpc, db, to_date, blacklist,
            removelist, security_only, use_update_date,
            no_errata_sync, errata, skip_errata_depsolve, parents)
        a.create_channels()
        a.prepare()
        a.clone()
         """
    # pylint: disable=R0902

    def __init__(self, channels, remote_api, db_api, to_date, blacklist,
                 removelist, security_only, use_update_date,
                 no_errata_sync, errata, skip_errata_depsolve, parents=None ):
        self.remote_api = remote_api
        self.db_api = db_api
        self.channel_map = channels
        self.to_date = to_date
        self.cloners = []
        self.blacklist = blacklist
        self.removelist = removelist
        if parents:
            self.src_parent = parents[0]
            self.dest_parent = parents[1]
            self.parents_specified = True
        else:
            self.src_parent = None
            self.dest_parent = None
            self.parents_specified = False
        self.channel_details = None
        self.security_only = security_only
        self.use_update_date = use_update_date
        self.no_errata_sync = no_errata_sync
        self.skip_errata_depsolve = skip_errata_depsolve
        self.solver = None
        self.visited = {}

        self.validate_source_channels()
        for from_label in self.ordered_labels():
            to_label = self.channel_map[from_label][0]
            cloner = ChannelCloner(from_label, to_label, self.to_date,
                                   self.remote_api, self.db_api,
                                   self.security_only, self.use_update_date,
                                   self.no_errata_sync, errata, skip_errata_depsolve)
            self.cloners.append(cloner)

    def needing_create(self):
        """
        returns a trimmed down version of channel_map where the
        value needs creating
        """
        to_create = {}
        existing = self.remote_api.list_channel_labels()
        if self.parents_specified:
            if (self.dest_parent not in existing
                    or self.src_parent not in existing):
                raise UserError("Channels specified with --parents must"
                                + " already exist.\nIf you want to clone the"
                                + " parent channels too simply add another"
                                + " --channels option.")
        for src, dest in list(self.channel_map.items()):
            if dest[0] not in existing:
                to_create[src] = dest[0]
        return to_create

    def pending(self):
        total = 0
        for cloner in self.cloners:
            total += cloner.pending()
        return total

    def find_cloner(self, src_label):
        for cloner in self.cloners:
            if cloner.src_label() == src_label:
                return cloner
        return None

    def create_channels(self, skip_depsolve=False):
        to_create = self.needing_create()

        if not to_create:
            return
        if self.parents_specified:
            dest_parent = [self.dest_parent]
        else:
            dest_parent = self.channel_map[self.src_parent]
        nvreas = []

        #clone the destination parent if it doesn't exist
        if dest_parent[0] in list(to_create.values()):
            self.remote_api.clone_channel(self.src_parent, dest_parent, None)
            del to_create[self.src_parent]
            cloner = self.find_cloner(self.src_parent)
            nvreas += [pkg['nvrea'] for pkg in
                       list(cloner.reset_new_pkgs().values())]
        #clone the children
        for cloner in self.cloners:
            if cloner.dest_label() in list(to_create.values()):
                dest = self.channel_map[cloner.src_label()]
                self.remote_api.clone_channel(cloner.src_label(),
                                              dest, dest_parent[0])
                nvreas += [pkg['nvrea'] for pkg in
                           list(cloner.reset_new_pkgs().values())]

        #dep solve all added packages with the parent channel
        if not skip_depsolve:
            self.dep_solve(nvreas, labels=(list(to_create.keys())
                                           + [self.src_parent]))

    def validate_source_channels(self):
        self.channel_details = self.remote_api.channel_details(
            self.channel_map, values=False)
        if not self.src_parent:
            self.src_parent = self.find_parent(list(self.channel_map.keys()))
        self.validate_children(self.src_parent, list(self.channel_map.keys()))

    def validate_dest_channels(self):
        self.channel_details = self.remote_api.channel_details(
            self.channel_map)
        if not self.dest_parent:
            self.dest_parent = self.find_parent(list(self.channel_map.values()))
        self.validate_children(self.dest_parent, list(self.channel_map.values()))

    def validate_children(self, parent, channel_list):
        """ Make sure all children are children of the parent"""
        for channel in channel_list:
            if isinstance(channel, type([])):
                channel = channel[0]
            if channel != parent:
                if (self.channel_details[channel]['parent_channel_label']
                        != parent):
                    raise UserError(("Child channel '%s' is not a child of "
                                     + "parent channel '%s'. If you are using --config "
                                     + "ensure you have not specified "
                                     + "existing-parent-do-not-modify on a child "
                                     + "channel.") % (channel, parent))

    def find_parent(self, label_list):
        found_list = []
        for label in label_list:
            if isinstance(label, type([])):
                label = label[0]
            if self.channel_details[label]['parent_channel_label'] == '':
                found_list.append(label)
        if not found_list:
            raise UserError("Parent Channel not specified.")
        if len(found_list) > 1:
            raise UserError("Multiple parent channels specified within the "
                            + "same channel tree.")
        return found_list[0]

    def ordered_labels(self):
        """Return list of labels with parent first"""
        if self.parents_specified:
            return list(self.channel_map.keys())
        labels = list(self.channel_map.keys())
        labels.remove(self.src_parent)
        labels.insert(0, self.src_parent)
        return labels

    def prepare(self):
        self.validate_dest_channels()
        for cloner in self.cloners:
            cloner.prepare()

    def get_errata_to_clone(self):
        d_result = {}
        for cloner in self.cloners:
            d_result[cloner.src_label() + "_to_" + cloner.dest_label()] = \
                cloner.get_errata_to_clone()
        return d_result

    def pre_summary(self):
        for cloner in self.cloners:
            cloner.pre_summary()

    def clone(self, skip_depsolve=False):
        added_pkgs = []
        for cloner in self.cloners:
            cloner.process()
            pkg_diff = cloner.pkg_diff()
            added_pkgs += pkg_diff
            log_clean(0, "")
            log_clean(0, "%i packages were added to %s as a result of clone:"
                      % (len(pkg_diff), cloner.dest_label()))
            sorted_pkg_diff = sorted(pkg_diff, key=lambda p: p['nvrea'])
            log_clean(0, "\n".join([pkg['nvrea'] for pkg in sorted_pkg_diff]))

        if added_pkgs and not skip_depsolve:
            self.dep_solve([pkg['nvrea'] for pkg in added_pkgs])

    def dep_solve(self, nvrea_list, labels=None):
        if not labels:
            labels = list(self.channel_map.keys())
        repos = [{"id": label, "relative_path": repodata(label)}
                 for label in labels]

        print("Copying repodata, please wait.")

        # dep solver expects the metadata to be in /repodata directory;
        # create temporary symlinks
        temp_repo_links = []
        repo = None
        for repo in repos:
            repodata_path = "%s/repodata" % (repo['relative_path'])
            create_repodata_link(repo['relative_path'], repodata_path)
            temp_repo_links.append(repodata_path)
        try:
            try:
                self.solver = DepSolver(repos)
                self.__dep_solve(nvrea_list)
                self.report_depsolve_results()
            except Exception as e:
                raise UserRepoError(repo["id"], e)
        finally:
            # clean up temporary symlinks
            for link in temp_repo_links:
                remove_repodata_link(link)

    def __dep_solve(self, nvrea_list):
        self.solver.setPackages(nvrea_list)
        dep_results = self.solver.processResults(self.solver.getDependencylist())
        self.process_deps(dep_results)

    def process_deps(self, deps):
        list_to_set = lambda x: set([tuple(y) for y in x])  # pylint: disable=consider-using-set-comprehension
        needed_list = dict((channel[0], [])
                           for channel in list(self.channel_map.values()))
        for cloner in self.cloners:
            if not cloner.dest_label() in self.visited:
                self.visited[cloner.dest_label()] = list_to_set(needed_list[cloner.dest_label()])
            self.visited[cloner.dest_label()] |= list_to_set(needed_list[cloner.dest_label()])

        print('Processing Dependencies:')
        pb = ProgressBar(prompt="", endTag=' - complete',
                         finalSize=len(deps), finalBarLength=40, stream=sys.stdout)
        pb.printAll(1)

        #loop through all the deps and find any that don't exist in the
        #  destination channels
        for pkg in deps:
            pb.addTo(1)
            pb.printIncrement()
            for solved_list in list(pkg.values()):
                for cloner in self.cloners:
                    if cloner.src_pkg_exist(solved_list) and not cloner.dest_pkg_exist(solved_list):
                        #grab oldest package
                        needed_list[cloner.dest_label()].append(solved_list[0])

        added_nevras = set()
        for cloner in self.cloners:
            needed = needed_list[cloner.dest_label()]
            needed_str = list_to_set(needed)
            for needed_pkg in needed_str:
                if needed_pkg in self.visited[cloner.dest_label()]:
                    while list(needed_pkg) in needed:
                        needed.remove(list(needed_pkg))
            self.visited[cloner.dest_label()] |= needed_str
            if needed:
                next_added = set(cloner.process_deps(needed))
                added_nevras = added_nevras | next_added
                cloner.total_added_nevras += len(next_added)

        pb.printComplete()

        # recursively solve dependencies to get dependencies-of-dependencies
        if added_nevras:
            print('Dependencies added, looking for new dependencies')
            self.__dep_solve(list(added_nevras))

    def remove_packages(self):
        for cloner in self.cloners:
            if self.removelist:
                cloner.remove_removelist(self.removelist)
            if self.blacklist:
                cloner.remove_blacklisted(self.blacklist)


    def report_depsolve_results(self):
        reported = 0
        for cloner in self.cloners:
            if cloner.total_added_nevras > 0:
                reported = 1
                print('%s RPM(s) added to %s to resolve dependencies.' \
                       % (cloner.total_added_nevras, cloner.dest_label()))
                cloner.total_added_nevras = 0
            if cloner.total_added_errata > 0:
                reported = 1
                print('%s errata added to %s to resolve dependencies.' \
                       % (cloner.total_added_errata, cloner.dest_label()))
                cloner.total_added_errata = 0

        if reported:
            print('Please see %s for details.' % LOG_LOCATION)

class ChannelCloner:
    # pylint: disable=R0902

    def __init__(self, from_label, to_label, to_date, remote_api, db_api,
                 security_only, use_update_date, no_errata_sync, errata,
                 skip_errata_depsolve):
        self.total_added_nevras = 0
        self.total_added_errata = 0
        self.remote_api = remote_api
        self.db_api = db_api
        self.from_label = from_label
        self.to_label = to_label
        self.to_date = to_date
        self.from_pkg_hash = None
        self.errata_to_clone = None
        self.available_errata = None
        self.new_pkg_hash = {}
        self.old_pkg_hash = {}
        self.security_only = security_only
        self.use_update_date = use_update_date
        self.no_errata_sync = no_errata_sync
        self.errata = errata
        self.skip_errata_depsolve = skip_errata_depsolve
        # construct a set of every erratum name in the original channel
        self.original_errata = set(self.remote_api.list_errata(self.from_label))
        self.original_pid_errata_map = {}
        self.bunch_size = 10

    def dest_label(self):
        return self.to_label

    def src_label(self):
        return self.from_label

    def pkg_diff(self):
        return diff_packages(list(self.old_pkg_hash.values()),
                             list(self.new_pkg_hash.values()))

    def reset_original_pkgs(self):
        self.old_pkg_hash = dict((pkg['nvrea'], pkg)
                                 for pkg in self.remote_api.list_packages(self.to_label))
        return self.old_pkg_hash

    def reset_new_pkgs(self):
        self.new_pkg_hash = dict((pkg['nvrea'], pkg)
                                 for pkg in self.remote_api.list_packages(self.to_label))
        return self.new_pkg_hash

    def reset_from_pkgs(self):
        self.from_pkg_hash = dict((pkg['nvrea'], pkg)
                                  for pkg in self.remote_api.list_packages(self.from_label))

    def prepare(self):
        self.reset_original_pkgs()
        self.errata_to_clone, self.available_errata = self.get_errata()

    def pending(self):
        return len(self.errata_to_clone)

    def get_errata_to_clone(self):
        return self.errata_to_clone

    def pre_summary(self):
        print("  %s -> %s  (%i/%i Errata)" % (self.from_label, self.to_label,
                                              len(self.errata_to_clone), len(self.available_errata)))

    def process(self):
        self.clone()
        #print "New packages added: %i" % (len(self.new_pkg_hash)
        #        - len(self.old_pkg_hash))

    def process_deps(self, needed_pkgs):
        needed_ids = []
        needed_names = set()
        for pkg in needed_pkgs:
            found = self.src_pkg_exist([pkg])
            if found:
                needed_ids.append(found['id'])
                needed_names.add(found['nvrea'])

        needed_errata = set() # list, [0] = advisory, [1] = synopsis
        still_needed_pids = []
        for pid in needed_ids:
            if pid not in self.original_pid_errata_map:
                errata_list = self.remote_api.list_providing_errata(pid)
                for erratum in errata_list:
                    if erratum['advisory'] in self.original_errata:
                        self.original_pid_errata_map[pid] = \
                            erratum['advisory']
                        needed_errata.add((self.original_pid_errata_map[pid], erratum['synopsis']))
                        break
                else:  # no match found, store so we don't repeat search
                    self.original_pid_errata_map[pid] = None
                    still_needed_pids.append(pid)
        needed_ids = still_needed_pids

        # Log the RPMs we're adding due to dep-solving
        needed_name_set = sorted(set(needed_names))
        if needed_name_set:
            log_clean(0, "")
            log_clean(0, "Adding %i RPM(s) needed for dependencies to %s" % (len(needed_name_set), self.to_label))
            for name in needed_name_set:
                log_clean(0, name)

        # Clone (and log) the errata we are adding for same
        if needed_errata:
            self.total_added_errata += len(needed_errata)
            log_clean(0, "")
            log_clean(0, "Cloning %i errata for dependencies to %s :" % (len(needed_errata), self.to_label))
            needed_errata_list = sorted(list(needed_errata))
            while(needed_errata_list):
                errata_set = needed_errata_list[:self.bunch_size]
                del needed_errata_list[:self.bunch_size]
                for e in errata_set:
                    log_clean(0, "%s - %s" % e)
                    if not self.skip_errata_depsolve:
                        e_pkgs = self.remote_api.get_erratum_packages(e[0])
                    else:
                        e_pkgs = []

                    for pkg in e_pkgs:
                        if self.from_label in pkg['providing_channels']:
                            pkg['nvrea'] = "%s-%s-%s.%s" % (pkg['name'],
                                                            pkg['version'],
                                                            pkg['release'],
                                                            pkg['arch_label'])
                            needed_names.add(pkg['nvrea'] )
                self.remote_api.clone_errata(self.to_label, [e[0] for e in errata_set])

        if needed_ids:
            self.remote_api.add_packages(self.to_label, needed_ids)

        self.reset_new_pkgs()
        return needed_names

    def src_pkg_exist(self, needed_list):
        if not self.from_pkg_hash:
            self.reset_from_pkgs()
        return self.pkg_exists(needed_list, self.from_pkg_hash)

    def dest_pkg_exist(self, needed_list):
        return self.pkg_exists(needed_list, self.new_pkg_hash)

    @staticmethod
    def pkg_exists(needed_list, pkg_list):
        """Given a list of packages in [N, EVR, A] format, do any of them
            exist in the pkg_hash with key of N-V-R.A  format"""
        for i in needed_list:
            key = "%s-%s-.%s" % (i[0], i[1], i[2])
            if key in pkg_list:
                return pkg_list[key]
        return False

    def clone(self):
        errata_ids = [e["advisory_name"] for e in self.errata_to_clone]
        if not errata_ids:
            return

        msg = 'Cloning Errata into %s (%i):' % (self.to_label, len(errata_ids))
        print(msg)
        log_clean(0, "")
        log_clean(0, msg)
        for e in sorted(self.errata_to_clone, key=lambda x: x['advisory_name']):
            log_clean(0, "%s - %s" % (e['advisory_name'], e['synopsis']))

        pb = ProgressBar(prompt="", endTag=' - complete',
                         finalSize=len(errata_ids), finalBarLength=40,
                         stream=sys.stdout)
        pb.printAll(1)
        while(errata_ids):
            errata_set = errata_ids[:self.bunch_size]
            del errata_ids[:self.bunch_size]
            self.remote_api.clone_errata(self.to_label, errata_set)
            pb.addTo(self.bunch_size)
            pb.printIncrement()

        # align modular metadata
        md_aligned = self.remote_api.align_modular_metadata(
            self.from_label, self.to_label)
        if md_aligned == 1:
            print("\nModular metadata aligned")

        self.reset_new_pkgs()
        pb.printComplete()

        if not self.no_errata_sync:
            log_clean(0, "")
            log_clean(0, "Synchronizing Errata in %s with originals"
                      % self.to_label)
            self.remote_api.sync_errata(self.to_label)

    def get_errata(self):
        """ Returns tuple of all available for cloning and what falls in
        the date range or is in the errata list"""
        available_errata = self.db_api.applicable_errata(self.from_label,
                                                         self.to_label)
        to_clone = []
        for err in available_errata:
            if self.errata:
                if err['advisory_name'] in self.errata:
                    to_clone.append(err)
            else:
                if (self.to_date and err[self.use_update_date].date()
                        <= self.to_date.date()):
                    if self.security_only:
                        if err['advisory_type'] == 'Security Advisory':
                            to_clone.append(err)
                    else:
                        to_clone.append(err)

        return (to_clone, available_errata)

    def __remove_packages(self, names_dict, pkg_list, name):
        """Base removal of packages
            names_dict  - dict containing  list of package names, with channel
                          lables as keys
            pkg_list  -  list of package dicts to consider
            name   - name of removal  'blacklist' or 'removelist', for display
        """
        found_ids = []
        found_names = []
        if not names_dict:
            return

        full_pkgs = []
        if "ALL" in names_dict:
            full_pkgs += names_dict["ALL"]
        if self.dest_label() in names_dict:
            full_pkgs += names_dict[self.dest_label()]

        # removes all empty string pattern from the list
        # e.g.: ["", ".*apache.*"] ==> [".*apache.*"], see bsc#1089396
        full_pkgs = [x for x in full_pkgs if x != '']

        reg_ex = re.compile("|".join(full_pkgs))

        # do the matching only if there is a reg_ex criteria
        if reg_ex.pattern:
            for pkg in pkg_list:
                if reg_ex.match(pkg['name']):
                    found_ids.append(pkg['id'])
                    found_names.append(pkg['nvrea'])

        log_clean(0, "")
        log_clean(0, "%s: Removing %i packages from %s." %
                  (name, len(found_ids), self.to_label))
        log_clean(0, "\n".join(found_names))

        if found_ids:
            print("%s: Removing %i packages from %s" % (name, len(found_ids),
                                                        self.to_label))
            self.remote_api.remove_packages(self.to_label, found_ids)

    def remove_removelist(self, pkg_names):
        self.__remove_packages(pkg_names, list(self.reset_new_pkgs().values()),
                               "Removelist")

    def remove_blacklisted(self, pkg_names):
        self.reset_new_pkgs()
        self.__remove_packages(pkg_names, self.pkg_diff(), "Blacklist")


class RemoteApi:

    """ Class for connecting to the XMLRPC spacewalk interface"""

    cache = {}

    def __init__(self, server_url, username, password):
        self.client = xmlrpclib.Server(server_url)
        self.auth_time = None
        self.auth_token = None
        try:
            self.username = username
            self.password = password
            self.__login()
        except xmlrpclib.Fault as e:
            raise UserError(e.faultString)

    def auth_check(self):
        """ makes sure that more than an hour hasn't passed since we
             logged in and will relogin if it has
        """
        if not self.auth_time or (datetime.datetime.now()
                                  - self.auth_time).seconds > 60 * 15:  # 15 minutes
            self.__login()

    def __login(self):
        self.auth_token = self.client.auth.login(self.username, self.password)
        self.auth_time = datetime.datetime.now()

    def list_channel_labels(self):
        self.auth_check()
        key = "chan_labels"
        if key in self.cache:
            return self.cache[key]

        chan_list = self.client.channel.listAllChannels(self.auth_token)
        to_ret = []
        for item in chan_list:
            to_ret.append(item["label"])
        self.cache[key] = to_ret
        return to_ret

    def channel_details(self, label_hash, keys=True, values=True):
        self.auth_check()
        to_ret = {}
        for src, dst in list(label_hash.items()):
            if keys:
                to_ret[src] = self.get_details(src)
            if values:
                to_ret[dst[0]] = self.get_details(dst[0])
        return to_ret

    def list_packages(self, label):
        self.auth_check()
        pkg_list = self.client.channel.software.listAllPackages(
            self.auth_token, label)
        #name-ver-rel.arch,
        for pkg in pkg_list:
            pkg['nvrea'] = "%s-%s-%s.%s" % (pkg['name'], pkg['version'],
                                            pkg['release'], pkg['arch_label'])
        return pkg_list

    def clone_errata(self, to_label, errata_list):
        self.auth_check()
        self.client.errata.cloneAsOriginal(self.auth_token, to_label,
                                           errata_list)

    def sync_errata(self, to_label):
        self.auth_check()
        self.client.channel.software.syncErrata(self.auth_token, to_label)

    def align_modular_metadata(self, from_label, to_label):
        return self.client.channel.software.alignMetadata(
            self.auth_token, from_label, to_label, 'modules')

    def get_details(self, label):
        self.auth_check()
        try:
            return self.client.channel.software.getDetails(self.auth_token,
                                                           label)
        except xmlrpclib.Fault as e:
            raise UserError(e.faultString + ": " + label)

    def add_packages(self, label, package_ids):
        self.auth_check()
        while(package_ids):
            pkg_set = package_ids[:20]
            del package_ids[:20]
            self.client.channel.software.addPackages(self.auth_token, label,
                                                     pkg_set)

    def remove_packages(self, label, package_ids):
        self.auth_check()
        while(package_ids):
            pkg_set = package_ids[:20]
            del package_ids[:20]
            self.client.channel.software.removePackages(self.auth_token,
                                                        label, pkg_set)

    def clone_channel(self, original_label, channel, parent):
        self.auth_check()
        details = {'name': channel[0], 'label': channel[0],
                   'summary': channel[0]}
        if len(channel) > 1:
            details['name'] = channel[1]
        if len(channel) > 2:
            details['summary'] = channel[2]
        if len(channel) > 3:
            details['description'] = channel[3]
        if parent and parent != '':
            details['parent_label'] = parent

        msg = "Cloning %s to %s with original package set." % (original_label,
                                                               details['label'])
        log_clean(0, "")
        log_clean(0, msg)
        print(msg)
        self.client.channel.software.clone(self.auth_token, original_label,
                                           details, True)

    def list_errata(self, channel_label):
        self.auth_check()
        errata = self.client.channel.software.listErrata(self.auth_token,
                                                         channel_label)
        return [erratum['advisory_name'] for erratum in errata]

    def get_original(self, clone_label):
        self.auth_check()
        return self.client.channel.software.getDetails(self.auth_token,
                                                       clone_label)['clone_original']

    def list_providing_errata(self, pid):
        self.auth_check()
        return self.client.packages.listProvidingErrata(self.auth_token, pid)

    def get_erratum_packages(self, advisory_name):
        self.auth_check()
        return self.client.errata.listPackages(self.auth_token, advisory_name)

class DBApi:

    """Class for connecting to the spacewalk DB"""

    def __init__(self):
        initCFG('server')
        rhnSQL.initDB()

    @staticmethod
    def applicable_errata(from_label, to_label):
        """list of errata that is applicable to be cloned, used db because we
            need to exclude cloned errata too"""
        h = rhnSQL.prepare("""
        select e.id, e.advisory_name, e.advisory_type, e.issue_date,
               e.synopsis, e.update_date
        from rhnErrata e  inner join
             rhnChannelErrata ce on e.id = ce.errata_id inner join
             rhnChannel c on c.id = ce.channel_id
        where C.label = :from_label and
              e.id not in
              (select e2.id
                 from rhnErrata e2 inner join
                      rhnChannelErrata ce2 on ce2.errata_id = e2.id inner join
                      rhnChannel c2 on c2.id = ce2.channel_id
                where c2.label = :to_label
                UNION
               select cloned.original_id
                 from rhnErrata e2 inner join
                      rhnErrataCloned cloned on cloned.id = e2.id inner join
                      rhnChannelErrata ce2 on ce2.errata_id = e2.id inner join
                      rhnChannel c2 on c2.id = ce2.channel_id
                where c2.label = :to_label)
        """)
        h.execute(from_label=from_label, to_label=to_label)
        to_ret = h.fetchall_dict() or []
        return to_ret


class UserError(Exception):

    def __init__(self, msg):
        Exception.__init__(self)
        self.msg = msg

    def __str__(self):
        return self.msg


class UserRepoError(UserError):

    def __init__(self, label, error=None):
        msg = ("Unable to read repository information.\n"
               + "Please verify repodata has been generated in "
               + "/var/cache/rhn/repodata/%s." % label)
        if error:
            msg += "\nError: %s" % error
        UserError.__init__(self, msg)
07070100000027000041FD00000000000000000000000262C3F37D00000000000000000000000000000000000000000000001600000000spacewalk-utils/tests07070100000028000081B400000000000000000000000162C3F37D0000089F000000000000000000000000000000000000004100000000spacewalk-utils/tests/test_spacewalk_manage_channel_lifecycle.py# coding: utf-8
"""
Tests for spacewalk-manager-channel-lifecycle script.
"""
import os
import pytest
import tempfile
from mock import MagicMock, patch
from . import helpers

helpers.symlink_source("spacewalk-manage-channel-lifecycle", "smcl")
from . import smcl
helpers.unsymlink_source("smcl")


class TestSMCL:
    """
    Integration/unit tests fusion for spacewalk-manage-channel-lifecycle script.
    """
    def test_get_current_phase(self):
        """
        Get configuration credentials.

        :return:
        """
        smcl.phases = ["dev", "test", "prod"]
        class DummyOptions:
            delimiter = '-'

        smcl.options = DummyOptions
        assert smcl.get_current_phase("develop") is None
        assert smcl.get_current_phase("dev-develop") == "dev"

    @pytest.mark.skip(reason="TBD")
    def test_argparse_port(self):
        """
        Dummy stub test for porting deprecated optparser to argparse.

        :return:
        """

    def test_configuration_saved_read(self):
        """
        Test configuration file is saved to the disk and can be read.

        :return:
        """
        with tempfile.TemporaryDirectory(prefix="smcl-", dir="/tmp") as tmpdir:
            smcl.CONF_DIR = os.path.join(tmpdir, ".spacewalk-manage-channel-lifecycle")
            smcl.USER_CONF_FILE = os.path.join(smcl.CONF_DIR, "settings.conf")
            smcl.SESSION_CACHE = os.path.join(smcl.CONF_DIR, "session")

            config = smcl.Config(smcl.USER_CONF_FILE)
            config.set("Millenium Falcon", "space speed", "75 MGLT")
            config.set("Millenium Falcon", "atmospheric speed", "1050 km/h")
            smcl.setup_config(config)

            # Save
            assert os.path.exists(os.path.join(tmpdir, ".spacewalk-manage-channel-lifecycle/settings.conf"))

            r_cfg = smcl.Config(smcl.USER_CONF_FILE)
            smcl.setup_config(r_cfg)

            assert r_cfg.get("Millenium Falcon", "space speed") == "75 MGLT"
            assert r_cfg.get("Millenium Falcon", "atmospheric speed") == "1050 km/h"
            assert r_cfg.get("general", "phases") == "dev, test, prod"
            assert r_cfg.get("general", "exclude channels") == ""
07070100000029000081B400000000000000000000000162C3F37D00000463000000000000000000000000000000000000002100000000spacewalk-utils/tests/helpers.py# coding: utf-8
"""
Helpers for the test suite.
"""
import os


def symlink_source(script_name: str, mod_name: str, path: str = None) -> None:
    """
    Create a symlink for Python source that contains no .py extension.

    :param script_name: script name
    :param mod_name: name of the module
    :param path: path to that script.
    :return: None
    """
    if path is None:
        path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))

    s_link_path = os.path.join(path, "tests", "{}.py".format(mod_name))
    if not os.path.exists(s_link_path):
        os.symlink(os.path.join(path, script_name), s_link_path)


def unsymlink_source(mod_name: str, path: str = None) -> None:
    """
    Remove symlink for Python source that contains no .py extension.

    :param mod_name: name of the symlink without an extension
    :param path:
    :return:
    """
    if path is None:
        path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
    mod_path = os.path.join(path, "tests", "{}.py".format(mod_name))

    if os.path.exists(mod_path):
        os.unlink(mod_path)
0707010000002A000081B400000000000000000000000162C3F37D00000000000000000000000000000000000000000000002200000000spacewalk-utils/tests/__init__.py0707010000002B000081B400000000000000000000000162C3F37D00000EDF000000000000000000000000000000000000001900000000spacewalk-utils/pylintrc# utils package pylint configuration

[MASTER]

# Profiled execution.
profile=no

# Pickle collected data for later comparisons.
persistent=no


[MESSAGES CONTROL]

# Disable the message(s) with the given id(s).

disable=C0103

;disable=I0011,
;		C0103,
;		C0302,
;		C0111,
;		R0801,
;		R0902,
;		R0903,
;		R0904,
;		R0912,
;		R0913,
;		R0914,
;		R0915,
;		R0921,
;		R0922,
;		W0142,
;		W0403,
;		W0603,
;		C1001,
;		W0121,
;		useless-else-on-loop,
;		bad-whitespace,
;		unpacking-non-sequence,
;		superfluous-parens,
;		cyclic-import,
;		redefined-variable-type,
;		no-else-return,
;
;		# Uyuni disabled
;		E0203,
;		E0611,
;		E1101,
;		E1102

[REPORTS]

# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable

# Include message's id in output
include-ids=yes

# Tells whether to display a full report or only the messages
reports=yes

# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"

[VARIABLES]

# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy


[BASIC]

# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$

# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$

# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$

# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$

# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input


[DESIGN]

# Maximum number of arguments for function / method
max-args=10

# Maximum number of locals for function / method body
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6

# Maximum number of branch for function / method body
max-branchs=20

# Maximum number of statements in function / method body
max-statements=50

# Maximum number of parents for a class (see R0901).
max-parents=7

# Maximum number of attributes for a class (see R0902).
max-attributes=7

# Minimum number of public methods for a class (see R0903).
min-public-methods=1

# Maximum number of public methods for a class (see R0904).
max-public-methods=20


[CLASSES]


[FORMAT]

# Maximum number of characters on a single line.
max-line-length=120

# Maximum number of lines in a module
max-module-lines=1000

# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string='    '


[MISCELLANEOUS]

# List of note tags to take in consideration, separated by a comma.
notes=
0707010000002C000081FD00000000000000000000000162C3F37D000058BA000000000000000000000000000000000000003300000000spacewalk-utils/spacewalk-manage-channel-lifecycle#!/usr/bin/python3
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2012 Aron Parsons <aronparsons@gmail.com>
#
# coding: utf-8
"""
Manage channels lifecycle.
"""

import os
import getpass
import logging
import re
import sys
import time
import typing
from optparse import Option, OptionParser  # pylint: disable=W0402
import socket
import xmlrpc.client as xmlrpclib
import configparser as ConfigParser


def get_localhost_fqdn():
    """
    Get FQDN of the current machine.

    :return:
    """
    fqdn = None
    try:
        for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
                socket.gethostname(), 0, 0, 0, 0, socket.AI_CANONNAME):
            if canonname:
                fqdn = canonname
                break
    except socket.gaierror as exc:
        pass  # Silence here
    except Exception as exc:
        print("Unhandled exception occurred while getting FQDN:", exc)

    return fqdn or socket.getfqdn()  # Fall-back to the /etc/hosts's FQDN


class Config:

    """
    Configuration parser with defaults handling.
    """
    SECTION_GENERAL = "general"
    OPT_PHASES = "phases"
    OPT_EXCLUDE_CHNL = "exclude channels"

    def __init__(self, path):
        self.path = path
        self.default_section = Config.SECTION_GENERAL
        self._cfg = ConfigParser.RawConfigParser()

    def get(self, section: str, option: str, default: typing.Any = None) -> typing.Any:
        """
        Get data from the configuration.

        :param section:
        :param option:
        :param default:
        :return:
        """
        if not self._cfg.has_section(section):
            if not self._cfg.has_section(self.default_section):
                return default
            if not self._cfg.has_option(self.default_section, option):
                return default
            return self._cfg.get(self.default_section, option)

        if not self._cfg.has_option(section, option):
            return default

        return self._cfg.get(section, option)

    def set(self, section: str, option: str, value: typing.Any) -> None:
        """
        Set section to the configuration structure.

        :param section: name of the section
        :param option: option name
        :param value: value
        :return: None
        """
        if not self._cfg.has_section(section):
            self._cfg.add_section(section)
        self._cfg.set(section, option, value)

    def write(self) -> None:
        """
        Write configuration to the INI file.

        :return: None
        """
        with open(self.path, "w") as handle:
            self._cfg.write(handle)

    def read(self) -> None:
        """
        Read configuration from the INI file.

        :return: None
        """
        self._cfg.read(self.path)

    def sections(self) -> typing.List[typing.AnyStr]:
        """
        Returns sections of the configuration INI.

        :return: list of the sections
        """
        return [section for section in self._cfg.sections() if section != self.default_section]


CONF_DIR = os.path.expanduser("~/.spacewalk-manage-channel-lifecycle")
USER_CONF_FILE = os.path.join(CONF_DIR, "settings.conf")
SESSION_CACHE = os.path.join(CONF_DIR, "session")


def setup_config(cfg):
    """
    Setup configuration.
    """
    if os.path.isfile(CONF_DIR):
        os.unlink(CONF_DIR)

    if not os.path.exists(CONF_DIR):
        logging.debug("Creating directory: %s", CONF_DIR)
        try:
            os.mkdir(CONF_DIR, 0o700)
        except IOError:
            logging.error("Unable to create %s", CONF_DIR)
            sys.exit(1)

    if not os.path.exists(USER_CONF_FILE):
        logging.debug("Creating configuration file: %s", USER_CONF_FILE)
        cfg.set(Config.SECTION_GENERAL, Config.OPT_PHASES, "dev, test, prod")
        cfg.set(Config.SECTION_GENERAL, Config.OPT_EXCLUDE_CHNL, "")
        cfg.write()
    else:
        cfg.read()


def ask(msg, password=False):
    """
    Ask input from the console. Hide the echo, in case of password or sensitive information.
    """
    msg += ": "
    return getpass.getpass(msg) if password else input(msg)


def parse_enumerated(data):
    """
    Parse comma-separated elements.
    """
    items = []
    if data:
        items = [_f for _f in [i.strip() for i in data.split(",")] if _f]

    return items


def get_next_phase(current_name: str) -> str:
    """
    Gets the name of the next phase of the workflow.

    :param current_name:
    :return:
    """
    if current_name not in phases:
        logging.error('Invalid phase name: %s', current_name)
        sys.exit(1)
    else:
        current_num = phases.index(current_name)

    # return the next phase name
    if current_num + 1 < len(phases):
        return phases[current_num + 1]

    logging.error("Maximum phase exceeded!  You can't move past '%s'.", phases[-1])
    sys.exit(1)


def print_channel_tree() -> None:
    """
    Prints the tree of existing channels to STDOUT.

    :return: None
    """
    tree = {}

    # determine parent channels so we can make a pretty tree for the user
    for ch_data in all_channels:
        if ch_data.get('parent_label'):
            if ch_data.get('parent_label') not in tree:
                tree[ch_data.get('parent_label')] = []

            tree[ch_data.get('parent_label')].append(ch_data.get('label'))
        else:
            if ch_data.get('label') not in tree:
                tree[ch_data.get('label')] = []

    # print the channels in a tree format
    print("Channel tree:")
    index = 1
    step = len(str(len(list(tree.keys()))))
    for parent in sorted(tree.keys()):
        if tree[parent]:
            print()
        print(" %s. %s" % (str(index).rjust(step), parent))
        index += 1
        if tree[parent]:
            for child in sorted(tree[parent]):
                print((' ' * step) + '     \\__ %s' % child)
            print()
    print()


def channel_exists(ch_label, quiet=False) -> bool:
    """
    Check if named channel exists.

    :param ch_label: channel label
    :param quiet: suppresses error log if True
    :return: returns True if channel exists.
    """
    try:
        client.channel.software.getDetails(session, ch_label)
        exists = True
    except xmlrpclib.Fault as e:
        if options.debug:
            logging.exception(e)
        if not quiet:
            logging.error('Channel %s does not exist', ch_label)
        exists = False
    return exists


# pylint: disable=R0912
def merge_channels(source_label, dest_label) -> None:
    """
    Merge channels data.

    :param source_label:
    :param dest_label:
    :return: None
    """
    if dest_label.startswith('rhel') and not options.tolerant:
        logging.error("Destination label starts with 'rhel'.  Aborting!")
        logging.error("Pass --tolerant to override this")
        sys.exit(1)

    if options.exclude_channel:
        for pattern in options.exclude_channel:
            if source_label == pattern:
                logging.info('Skipping %s due to an exclude filter', source_label)
                return

    # remove all the packages from the channel if requested
    if options.clear_channel or options.rollback:
        clear_channel(dest_label)

    logging.info('Merging packages from %s into %s', source_label, dest_label)

    if not options.dry_run:
        # merge the packages from one channel into another
        try:
            packages = client.channel.software.mergePackages(session, source_label, dest_label, True)
            logging.info('Added %i packages', len(packages))
        except xmlrpclib.Fault as e:
            if options.debug:
                logging.exception(e)
            logging.error('Failed to merge packages')

            if not options.tolerant:
                sys.exit(1)

    if not options.no_errata:
        logging.info('Merging errata into %s', dest_label)

        if not options.dry_run:
            # merge the errata from one channel into another
            try:
                errata = client.channel.software.mergeErrata(session,
                                                             source_label,
                                                             dest_label)

                logging.info('Added %i errata', len(errata))
            except xmlrpclib.Fault as e:
                if options.debug:
                    logging.exception(e)
                logging.error('Failed to merge errata')

                if not options.tolerant:
                    sys.exit(1)
    print()
# pylint: enable=R0912


def clone_channel(source_label, source_details) -> None:
    """
    Clone channel.

    :param source_label:
    :param source_details:
    :return: None
    """
    if options.exclude_channel:
        for pattern in options.exclude_channel:
            if source_label == pattern:
                logging.info('Skipping %s due to an exclude filter', source_label)
                return

    # channel doesn't exist, clone it from the original
    logging.info('Cloning %s from %s', source_details['label'], source_label)

    if not options.dry_run:
        try:
            client.channel.software.clone(session, source_label, source_details, False)
        except xmlrpclib.Fault as e:
            if options.debug:
                logging.exception(e)
            logging.error('Failed to clone channel')

            if not options.tolerant:
                sys.exit(1)


def clear_channel(lbl) -> None:
    """
    Clears all the errata in the channel.

    :param lbl:
    :return: None
    """
    logging.info('Clearing all errata from %s', lbl)

    if not options.dry_run:
        # attempt to clear the errata from the channel
        try:
            all_errata = client.channel.software.listErrata(session, lbl)
            errata_names = [e.get('advisory_name') for e in all_errata]
            client.channel.software.removeErrata(session, lbl, errata_names, False)
        except xmlrpclib.Fault as e:
            if options.debug:
                logging.exception(e)
            logging.warning('Failed to clear errata from %s', lbl)

    if not options.dry_run:
        logging.info('Clearing all packages from %s', lbl)
        all_packages = client.channel.software.listAllPackages(session, lbl)
        package_ids = [p.get('id') for p in all_packages]
        client.channel.software.removePackages(session, lbl, package_ids)


def get_current_phase(source):
    """
    Get current phase from the source channel label.
    """
    out = None
    for phase in phases:
        if source.startswith("{phase}{dlm}".format(phase=phase, dlm=options.delimiter)):
            out = phase
            break
    return out


def build_channel_labels(source):
    """
    Build channel labels.

    :param source:
    :return: source, destination
    """
    destination = None
    if options.archive:
        # prefix the existing channel with 'archive-YYYYMMDD-'
        date_string = time.strftime('%Y%m%d', time.gmtime())
        destination = 'archive{dlm}{date}{dlm}{src}'.format(dlm=options.delimiter, date=date_string, src=source)
    elif options.init:
        destination = '{phase}{dlm}{src}'.format(phase=phases[0], dlm=options.delimiter, src=source)

        if channel_exists(destination, quiet=True):
            logging.error('%s already exists.  Use --promote instead.', destination)
            sys.exit(1)
    elif options.promote:
        # get the phase label from the channel label
        current_phase = get_current_phase(source)
        if current_phase:
            next_phase = get_next_phase(current_phase)

            # replace the name of the phase in the destination label
            destination = re.sub('^%s' % current_phase, next_phase, source)
        else:
            destination = '{phase}{dlm}{src}'.format(phase=phases[0], dlm=options.delimiter, src=source)
    elif options.rollback:
        # strip off the archive prefix when rolling back
        destination = re.sub(r'archive{dlm}\d{{8}}{dlm}'.format(dlm=options.delimiter), '', source)

    return source, destination


def get_config_credentials(conf, opts):
    '''
    Look into configuration for credentials for the admin user.
    '''
    username = conf.get('general', 'username')
    password = conf.get('general', 'password')
    if username and password:
        opts.username = username
        opts.password = password


if __name__ == "__main__":
    usage = '''usage: %prog [options]

    Create a 'dev' channel based on the latest packages:
    spacewalk-manage-channel-lifecycle -c sles11-sp3-pool-x86_64 --init

    Promote the packages from 'dev' to 'test':
    spacewalk-manage-channel-lifecycle -c dev-sles11-sp3-pool-x86_64 --promote

    Promote the packages from 'test' to 'prod':
    spacewalk-manage-channel-lifecycle -c test-sles11-sp3-pool-x86_64 --promote

    Archive a production channel:
    spacewalk-manage-channel-lifecycle -c prod-sles11-sp3-pool-x86_64 --archive

    Rollback the production channel to an archived version:
    spacewalk-manage-channel-lifecycle \\
        -c archive-20110520-prod-sles11-sp3-pool-x86_64 --rollback'''

    option_list = [
        Option('-l', '--list-channels', help='list existing channels',
               action='store_true'),
        Option('', '--init', help='initialize a development channel',
               action='store_true'),
        Option('-w', '--workflow', help='use configured workflow', default=""),
        Option('-D', '--delimiter', type='choice', choices=['-', '_', '.'],
               help='delimiter used between workflow and channel name', default="-"),
        Option('-f', '--list-workflows', help='list configured workflows', default=False,
               action='store_true'),
        Option('', '--promote', help='promote a channel to the next phase',
               action='store_true'),
        Option('', '--archive', help='archive a channel', action='store_true'),
        Option('', '--rollback', help='rollback', action='store_true'),
        Option('-c', '--channel', help='channel to init/promote/archive/rollback'),
        Option('-C', '--clear-channel',
               help='clear all packages/errata from the channel before merging',
               action='store_true'),
        Option('-x', '--exclude-channel', help='skip these channels',
               action='append'),
        Option('', '--no-errata', help="don't merge errata data with --promote",
               action='store_true'),
        Option('', '--no-children', help="don't operate on child channels",
               action='store_true'),
        Option('-P', '--phases', default='',
               help='comma-separated list of phases [default: dev,test,prod]'),
        Option('-u', '--username', help='Spacewalk username'),
        Option('-p', '--password', help='Spacewalk password'),
        Option('-s', '--server',
               help='Spacewalk server [default: %default]', default=get_localhost_fqdn()),
        Option('-n', '--dry-run', help="don't perform any operations",
               action='store_true'),
        Option('-t', '--tolerant', help='be tolerant of errors',
               action='store_true'),
        Option('-d', '--debug', help='enable debug logging', action='count')
    ]

    parser = OptionParser(option_list=option_list, usage=usage)
    (options, args) = parser.parse_args()

    if options.debug:
        level = logging.DEBUG
    else:
        level = logging.INFO

    logging.basicConfig(level=level, format='%(levelname)s: %(message)s')

    options.workflow = options.workflow or Config.SECTION_GENERAL
    config = Config(USER_CONF_FILE)
    try:
        setup_config(config)
    except ConfigParser.ParsingError as ex:
        logging.error("Unable to process configuration:\n%s", str(ex))
        sys.exit(1)

    if options.list_workflows:
        workflows = config.sections()
        if not workflows:
            print("There are no additinal configured workflows except default.")
            sys.exit(0)

        print("Configured additional workflows:")
        idx = 1
        for workflow in workflows:
            print("  %s. %s" % (idx, workflow))
            idx += 1
        print()
        sys.exit(0)

    # sanity check
    if not (options.init or options.promote or options.archive or options.rollback
            or options.list_channels):
        logging.error("You must provide an action: %s", "(--init/--promote/--archive/--rollback/--list-channels)")
        sys.exit(1)
    elif not options.list_channels and not options.channel:
        logging.error('--channel is required')
        sys.exit(1)

    # parse the list of phases
    phases = parse_enumerated(options.phases or config.get(options.workflow, Config.OPT_PHASES, "dev,test,prod"))

    # update exclude channel option
    options.exclude_channel = options.exclude_channel or parse_enumerated(config.get(options.workflow,
                                                                                     Config.OPT_EXCLUDE_CHNL, ""))
    if len(phases) < 2:
        logging.error('You must define at least 2 phases')
        sys.exit(1)

    # determine if you want to enable XMLRPC debugging
    xmlrpc_debug = options.debug and options.debug > 1

    # connect to the server
    client = xmlrpclib.Server('https://%s/rpc/api' % options.server,
                              verbose=xmlrpc_debug)

    session = None

    # check for an existing session
    if os.path.exists(SESSION_CACHE):
        try:
            fh = open(SESSION_CACHE, 'r')
            session = fh.readline()
            fh.close()
        except IOError as e:
            if options.debug:
                logging.exception(e)
            logging.debug('Failed to read session cache')

        # validate the session
        try:
            client.channel.listMyChannels(session)
        except xmlrpclib.Fault as e:
            if options.debug:
                logging.exception(e)
            logging.warning('Existing session is invalid')
            session = None

    # Look for credentials in settings.conf
    get_config_credentials(config, options)

    if not session:
        # prompt for the username
        if not options.username:
            while not options.username:
                options.username = ask('Spacewalk Username')

        # prompt for the password
        if not options.password:
            options.password = ask('Spacewalk Password', password=True)
        if not options.password:
            logging.warning("Empty password is not a good practice!")

        # login to the server
        try:
            session = client.auth.login(options.username, options.password)
        except xmlrpclib.Fault as e:
            if options.debug:
                logging.exception(e)
            logging.error('Failed to log into %s', options.server)
            sys.exit(1)

        # save the session for subsuquent runs
        try:
            fh = open(SESSION_CACHE, 'w')
            fh.write(session)
            fh.close()
        except IOError as e:
            if options.debug:
                logging.exception(e)
            logging.warning('Failed to write session cache')

    # list all of the channels once
    try:
        all_channels = client.channel.listSoftwareChannels(session)
        all_channel_labels = [c.get('label') for c in all_channels]
    except xmlrpclib.Fault as e:
        if options.debug:
            logging.exception(e)
        logging.error('Could not retrieve the list of software channels')
        sys.exit(1)

    # list the available custom channels and exit
    if options.list_channels:
        print_channel_tree()
        sys.exit(0)

    # ensure the source channel exists
    if not channel_exists(options.channel):
        sys.exit(1)

    # determine the channel labels for the parent channel
    (parent_source, parent_dest) = build_channel_labels(options.channel)

    # inform the user that no changes will take place
    if options.dry_run:
        logging.info('DRY RUN - No changes are being made to the channels')
        time.sleep(2)
        print()

    # merge packages/errata if the destination channel label already exists,
    # otherwise clone it from the source channel
    if parent_dest in all_channel_labels:
        merge_channels(parent_source, parent_dest)
    else:
        # channel doesn't exist, clone it from the original
        # let's check if there's a parent for the original channel and if so, clone it in the right place
        new_parent_source = client.channel.software.getDetails(session, parent_source)['parent_channel_label']
        new_parent_dest = ""
        if new_parent_source:
            (new_parent_source, new_parent_dest) = build_channel_labels(new_parent_source)

        details = {'label': parent_dest,
                   'name': parent_dest,
                   'summary': parent_dest,
                   'parent_label': new_parent_dest}

        clone_channel(parent_source, details)

    if not options.no_children:
        children = []

        # get the child channels for the source parent channel
        for channel in all_channels:
            if channel.get('parent_label') == parent_source:
                children.append(channel.get('label'))

        for label in children:
            # determine the child channels for the source parent channel
            (child_source, child_dest) = build_channel_labels(label)

            # merge packages/errata if the destination channel label already exists,
            # otherwise clone it from the source channel
            if child_dest in all_channel_labels:
                merge_channels(child_source, child_dest)
            else:
                details = {'label': child_dest,
                           'name': child_dest,
                           'summary': child_dest,
                           'parent_label': parent_dest}

                clone_channel(child_source, details)
0707010000002D000081B400000000000000000000000162C3F37D00001B38000000000000000000000000000000000000002500000000spacewalk-utils/spacewalk-utils.spec#
# spec file for package spacewalk-utils
#
# Copyright (c) 2021 SUSE LLC
# Copyright (c) 2008-2018 Red Hat, Inc.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#

Name:           spacewalk-utils
Version:        4.3.12
Release:        1
Summary:        Utilities that may be run against a SUSE Manager/Uyuni server
License:        GPL-2.0-only AND GPL-3.0-or-later
Group:          Productivity/Other
URL:            https://github.com/uyuni-project/uyuni
Source0:        https://github.com/spacewalkproject/spacewalk/archive/%{name}-%{version}.tar.gz
BuildRoot:      %{_tmppath}/%{name}-%{version}-build
BuildArch:      noarch
BuildRequires:  docbook-utils
BuildRequires:  fdupes
BuildRequires:  python3
BuildRequires:  python3-rpm-macros
BuildRequires:  uyuni-base-common

# Required by spacewalk-hostname-rename
Requires:       bash
# Required by spacewalk-hostname-rename
Requires:       cobbler
# Required by spacewalk-hostname-rename
Requires:       iproute
# Required by spacewalk-hostname-rename
%if 0%{?suse_version}
Requires:       perl = %{perl_version}
%else
Requires:       perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
%endif
# Required by spacewalk-hostname-rename
Requires:       perl-Satcon
# Required by depsolver.py
Requires:       (python3-PyYAML or python3-pyyaml)
# Required by depsolver.py
Requires:       python3-solv
# Required by depsolver.py, cloneByDate.py, spacewalk-common-channels
Requires:       python3-uyuni-common-libs
# Required by spacewalk-clone-by-date, spacewalk-sync-setup
Requires:       python3-salt
# Required by spacewalk-hostname-rename
Requires:       rpm
# Required by spacewalk-hostname-rename
Requires:       spacewalk-admin
# Required by cloneByDate.py, spacewalk-clone-by-date, spacewalk-common-channels
Requires:       spacewalk-backend
# Required by cloneByDate.py
Requires:       spacewalk-backend-sql
# Required by cloneByDate.py, depsolver.py
Requires:       spacewalk-backend-tools >= 2.2.27
# Required by spacewalk-hostname-rename
Requires:       spacewalk-certs-tools
# Required by spacewalk-hostname-rename
Requires:       spacewalk-config
# Required by spacewalk-export
Requires:       spacewalk-reports
# Required by spacewalk-hostname-rename
Requires:       spacewalk-setup
# Required by spacewalk-hostname-rename (provides /usr/bin/spacewalk-sql)
Requires:       susemanager-schema
# Required by cloneByDate.py, depsolver.py,spacewalk-clone-by-date
Requires(pre):  uyuni-base-common

%description
Utilities that may be run against a SUSE Manager server (supported) or an Uyuni server

%package extras
Summary:        Extra utilities that may run against a SUSE Manager/Uyuni server
# Required by spacewalk-watch-channel-sync.sh
Group:          Productivity/Other
Requires:       bash
# Required by taskotop
Requires:       python3-curses
# Required by sw-ldap-user-sync
Requires:       python3-ldap
# Required by sw-ldap-user-sync
Requires:       python3-PyYAML
# Required by migrate-system-profile
Requires:       python3-rhnlib >= 2.5.20
# Required by migrateSystemProfile.py, systemSnapshot.py
Requires:       python3-uyuni-common-libs
# Required by spacewalk-manage-snapshots, systemSnapshot.py
Requires:       spacewalk-backend
# Required by taskotop
Requires:       spacewalk-backend-sql
# Required by spacewalk-final-archive, spacewalk-watch-channel-sync.sh
Requires:       spacewalk-backend-tools >= 2.2.27
# As spacewalk-utils owns {python3_sitelib}/utils
Requires:       spacewalk-utils
# Required by migrate-system-profile, migrateSystemProfile.py, spacewalk-export-channels, spacewalk-manage-snapshots, sw-system-snapshot, systemSnapshot.py
Requires(pre):  uyuni-base-common

%description extras
Extra utilities that may be run against a SUSE Manager server (unsupported) or an Uyuni server

%prep
%setup -q

%build
make all

%install
make install PREFIX=$RPM_BUILD_ROOT ROOT=%{python3_sitelib} \
    MANDIR=%{_mandir}
pushd %{buildroot}
%if 0%{?suse_version}
%py3_compile -O %{buildroot}%{python3_sitelib}
%fdupes %{buildroot}%{python3_sitelib}
%else
%py_byte_compile %{__python3} %{buildroot}%{python3_sitelib}
%endif
popd

%check

%files
%defattr(-,root,root)
%license COPYING.GPLv2 COPYING.GPLv3
%attr(755,root,root) %{_bindir}/spacewalk-common-channels
%attr(755,root,root) %{_bindir}/spacewalk-clone-by-date
%attr(755,root,root) %{_bindir}/spacewalk-hostname-rename
%attr(755,root,root) %{_bindir}/spacewalk-manage-channel-lifecycle
%attr(755,root,root) %{_bindir}/spacewalk-sync-setup
%config %{_sysconfdir}/rhn/spacewalk-common-channels.ini
%dir %{python3_sitelib}/utils
%{python3_sitelib}/utils/__init__.py*
%{python3_sitelib}/utils/systemSnapshot.py*
%{python3_sitelib}/utils/cloneByDate.py*
%{python3_sitelib}/utils/depsolver.py*
%dir %{python3_sitelib}/utils/__pycache__
%{python3_sitelib}/utils/__pycache__/__init__.*
%{python3_sitelib}/utils/__pycache__/systemSnapshot.*
%{python3_sitelib}/utils/__pycache__/cloneByDate.*
%{python3_sitelib}/utils/__pycache__/depsolver.*
%{_mandir}/man8/spacewalk-clone-by-date.8.gz
%{_mandir}/man8/spacewalk-hostname-rename.8.gz
%{_mandir}/man8/spacewalk-sync-setup.8.gz

%files extras
%defattr(-,root,root)
%license COPYING.GPLv2 COPYING.GPLv3
%attr(755,root,root) %{_bindir}/apply_errata
%attr(755,root,root) %{_bindir}/delete-old-systems-interactive
%attr(755,root,root) %{_bindir}/migrate-system-profile
%attr(755,root,root) %{_bindir}/spacewalk-api
%attr(755,root,root) %{_bindir}/spacewalk-export
%attr(755,root,root) %{_bindir}/spacewalk-export-channels
%attr(755,root,root) %{_bindir}/spacewalk-final-archive
%attr(755,root,root) %{_bindir}/spacewalk-manage-snapshots
%attr(755,root,root) %{_bindir}/spacewalk-watch-channel-sync.sh
%attr(755,root,root) %{_bindir}/sw-ldap-user-sync
%attr(755,root,root) %{_bindir}/sw-system-snapshot
%attr(755,root,root) %{_bindir}/taskotop
%{python3_sitelib}/utils/migrateSystemProfile.py*
%{python3_sitelib}/utils/__pycache__/migrateSystemProfile.*
%config(noreplace) %{_sysconfdir}/rhn/sw-ldap-user-sync.conf
%{_mandir}/man8/delete-old-systems-interactive.8.gz
%{_mandir}/man8/migrate-system-profile.8.gz
%{_mandir}/man8/spacewalk-api.8.gz
%{_mandir}/man8/spacewalk-export-channels.8.gz
%{_mandir}/man8/spacewalk-export.8.gz
%{_mandir}/man8/spacewalk-final-archive.8.gz
%{_mandir}/man8/spacewalk-manage-snapshots.8.gz
%{_mandir}/man8/sw-system-snapshot.8.gz
%{_mandir}/man8/taskotop.8.gz

%changelog
0707010000002E000081B400000000000000000000000162C3F37D00000D52000000000000000000000000000000000000003400000000spacewalk-utils/delete-old-systems-interactive.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN">
<refentry>

<RefMeta>
<RefEntryTitle>delete-old-systems-interactive</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 1.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>delete-old-systems-interactive</command></RefName>
<RefPurpose>
delete inactive systems from Spacewalk server.
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>delete-old-systems-interactive [OPTIONS] --idle time_delta</command>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>
    <para><emphasis>delete-old-systems-interactive</emphasis> - will use Spacewalk API to determine when each registred system checked in last time and optionaly will delete systems which have been inactive for time_delta.</para>
    <para> This script will list all registred systems and their last check in. And will if none system is idle more then time_delta, it will write summary:</para>
    <para>Total systems [127], none idle</para>
    <para>If some systems are idle more then time_delta, you will get summary like:</para>
    <para>Total systems [127], would delete [51]</para>
    <para>None system is deleted by default, unless you specify --force option.</para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--idle time_delta</term>
        <listitem>
            <para>Search for system, which are inactive for more then time_delta. If only number is specified, it is interpreted as days. You can use suffixes m (for minutes), h (hours), d (days) and w (weeks). E.g. "--idle 10" and "--idle 10d" is the same and means ten days, "--idle 2w" means two weeks.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--host host</term>
        <listitem>
            <para>Hostname of you Spacewalk server. If not set, localhost is used by default.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--username login</term>
        <listitem>
            <para>Your username. By default "admin".</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--password pass</term>
        <listitem>
            <para>Your password. If not specified on command line, then it is read from /etc/rhn/$username-password</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--force</term>
        <listitem>
            <para>If specified, then idle systems are deleted.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>EXAMPLES</Title>
    <para>delete-old-systems-interactive --server=spacewalk.com --idle 30d</para>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Miroslav Suchy<email>msuchy@redhat.com</email></member>
    <member>Jan Pazdziora<email>jpazdziora@redhat.com</email></member>
    <member>Tomas Kasparek<email>tkasparek@redhat.com</email></member>
</simplelist>
</RefSect1>

<RefSect1><Title>COPYRIGHT AND LICENSE</Title>
    <para>Copyright (c) 2009--2015 Red Hat, Inc.</para>
    <para>Released under GNU General Public License, version 2 (GPLv2).</para>
</RefSect1>

</RefEntry>
0707010000002F000081B400000000000000000000000162C3F37D00000030000000000000000000000000000000000000001C00000000spacewalk-utils/__init__.py#Copyright (c) 2005, Red Hat Inc.

__all__ = []
07070100000030000081FD00000000000000000000000162C3F37D00002B91000000000000000000000000000000000000002B00000000spacewalk-utils/spacewalk-manage-snapshots#!/usr/bin/python3
#
# Utility for purging old snapshots
#
# Copyright (c) 2018 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

"""spacewalk-manage-snapshots - a tool allowing the Spacewalk Admin to clean old entries out of the snapshot tables in the SW database"""

import os
import sys
import time

from optparse import OptionParser
from datetime import datetime

try:
    from spacewalk.common import rhnLog
    from spacewalk.common.rhnConfig import CFG, initCFG
    from spacewalk.common.rhnLog import log_clean
    from spacewalk.server import rhnSQL
except ImportError:
    # pylint: disable=F0401
    _LIBPATH = '/usr/share/rhn'
    if _LIBPATH not in sys.path:
        sys.path.append(_LIBPATH)
    from server import rhnSQL
    from common import rhnLog
    from common.rhnLog import log_clean
    from common.rhnConfig import CFG, initCFG

LOG_LOCATION = '/var/log/rhn/spacewalk-manage-snapshots.log'

SQL_PIECES = {'oracle': {
    'COUNT': 'select count(*) from %(table)s',
    'BUCKETS': """
select case
  when ss.created >= (current_timestamp - numtodsinterval(:interval,'day')) then '1'
  when ss.created < (current_timestamp - numtodsinterval(:interval,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*2,'day')) then '2'
  when ss.created < (current_timestamp - numtodsinterval(:interval*2,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*3,'day')) then '3'
  when ss.created < (current_timestamp - numtodsinterval(:interval*3,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*4,'day')) then '4'
  else '5'
end as AGE,
count(ss.id) as SNAPSHOTS,
count(distinct ss.server_id) as SERVERS_AFFECTED
  from rhnsnapshot ss
 group by case
  when ss.created >= (current_timestamp - numtodsinterval(:interval,'day')) then '1'
  when ss.created < (current_timestamp - numtodsinterval(:interval,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*2,'day')) then '2'
  when ss.created < (current_timestamp - numtodsinterval(:interval*2,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*3,'day')) then '3'
  when ss.created < (current_timestamp - numtodsinterval(:interval*3,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*4,'day')) then '4'
  else '5'
end
 order by AGE
    """,
    'FIND_TABLE': "select table_name from user_tables where table_name like 'RHNSNAPSHOT%' escape '|' order by table_name",
    'PURGE_DATA': "delete from rhnsnapshot ss where ROWNUM <= :batchsize and ss.created < to_timestamp(:current_timestamp) - numtodsinterval(:num_days,'day')",
    'PURGE_COUNT': "select count(ss.id) from rhnsnapshot ss where ss.created < to_timestamp(:current_timestamp) - numtodsinterval(:num_days,'day')",
    'GET_NOW' : "select current_timestamp from dual",
    'LOCK_GROUPS': "lock table rhnSnapshotServerGroup in row exclusive mode"
},
    'postgresql': {
    'COUNT': 'select count(*) from %(table)s;',
    'BUCKETS': """
select case
  when ss.created >= (current_timestamp - numtodsinterval(:interval,'day'))
    then '1'
  when ss.created < (current_timestamp - numtodsinterval(:interval,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*2,'day'))
    then '2'
  when ss.created < (current_timestamp - numtodsinterval(:interval*2,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*3,'day'))
    then '3'
  when ss.created < (current_timestamp - numtodsinterval(:interval*3,'day'))
   and ss.created >= (current_timestamp - numtodsinterval(:interval*4,'day'))
    then '4'
  else '5'
end as AGE,
count(ss.id) as SNAPSHOTS,
count(distinct ss.server_id) as SERVERS_AFFECTED
  from rhnsnapshot ss
 group by AGE
 order by AGE
    """,
    'FIND_TABLE': "select table_name from information_schema.tables where table_name like 'rhnsnapshot%' escape '!' order by table_name",
    'PURGE_DATA': "delete from rhnsnapshot ss where ss.id = any (array(select id from rhnsnapshot where created < to_timestamp(:current_timestamp, 'YYYY-MM-DD HH24:MI:SS:MS') - numtodsinterval(:num_days, 'day') limit :batchsize))",
    'PURGE_COUNT': "select count(ss.id) from rhnsnapshot ss where ss.created < to_timestamp(:current_timestamp, 'YYYY-MM-DD HH24:MI:SS:MS') - numtodsinterval(:num_days, 'day')",
    'GET_NOW' : "select current_timestamp",
    'LOCK_GROUPS': "lock table rhnSnapshotServerGroup in access exclusive mode"
}
}


def setup_options():
    usage = 'usage: %prog [--reports [--interval <INTERVAL>] | --delete-older-than <DAYS>]'
    parser = OptionParser(usage=usage)

    parser.add_option('-b', '--batch-size', action='store', dest='batch_size',
                      metavar='ROWS-PER-COMMIT', type='int', default=1000,
                      help='Number of rows to delete per commit (default is 1000)')
    parser.add_option('-d', '--delete-older-than', action='store', dest='num_days', metavar='DAYS', type='int',
                      help='Snapshots DAYS old or older will be purged from the database')
    parser.add_option('-i', '--interval-older-than', action='store', dest='report_interval',
                      metavar='INTERVAL', type='int', default='90',
                      help='INTERVAL-in-days period to use for --reports (default is 90 days)')
    parser.add_option('-r', '--reports', action='store_true', dest='reports',
                      help='Report current table-sizes (in rows) for all snapshot-related tables and report on the last four --interval periods for snapshots')
    return parser


def validate_options():
    parser = setup_options()
    (options, args) = parser.parse_args()

    # You have to ask for a report or a delete
    if not (options.reports or (options.num_days is not None)):
        parser.error("Either --reports or --delete-older-than is required")

    if options.batch_size < 1:
        parser.error("--batch-size requires a positive integer argument")

    if options.report_interval < 1:
        parser.error("--interval-older-than requires a positive number of days")

    if options.num_days and options.num_days < 1:
        parser.error("--delete-older-than requires a positive number of days")

    return (options, args)


def _log(stmt):
    logger = os.getenv("SUDO_USER")
    if logger == None:
        logger = 'root'
    log_clean(0, '{0}|{1}|{2}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), logger, stmt))


def _get_sql(key):
    return SQL_PIECES.get(CFG.DB_BACKEND).get(key)


def tables():
    """What are the tables whose names fit the pattern rhnsnapshot*?"""
    stmt = rhnSQL.execute(_get_sql('FIND_TABLE'))
    table_names = stmt.fetchall() or []
    rc = []
    for name in table_names:
        rc.append(name[0])
    return rc


def count(tbl):
    """How many rows are in table <tbl>?"""
    tbl_str = _get_sql('COUNT') % {"table": tbl}
    stmt = rhnSQL.execute(tbl_str)
    num_rows = stmt.fetchone()
    return num_rows[0]


def purge_count(tbl, opt, since):
    """How many rows are we planning to remove from <tbl>?"""
    tbl_str = _get_sql('PURGE_COUNT') % {"table": tbl}
    stmt = rhnSQL.execute(tbl_str, current_timestamp=since, num_days=opt.num_days )
    num_rows = stmt.fetchone()
    return num_rows[0]

def _generate_age_string(val, opt):
    """age is returned as a bucket-number, 1-5. Convert it to days-old based on report_interval"""
    bucket = int(val)
    if bucket == 5:
        return '>{0}'.format((bucket-1)*opt.report_interval)
    else:
        return '{0:4d}-{1:<4d}'.format(((bucket-1)*opt.report_interval)+1, bucket*opt.report_interval)

def buckets(opt):
    """Show number of snapshots per-interval"""
    tbl_str = _get_sql('BUCKETS')
    stmt = rhnSQL.execute(tbl_str, interval=opt.report_interval)
    rs = stmt.fetchall_dict() or []
    # strip the chars we use to force ordering
    for row in rs:
      row['age'] = _generate_age_string(row['age'], opt)
    return rs

def report(opt):
    """List size of all rhnsnapshot* tables found"""
    names = tables()
    print()
    print('{0:>26} : {1:>12}'.format('Table name', 'rows'))
    for name in names:
        print('{0:>26} : {1:>12d}'.format(name, count(name)))
    by_quarter = buckets(opt)
    if by_quarter:
        print()
        print(': {0:^36} :'.format('Snapshot info, ' + str(opt.report_interval) + '-day interval'))
        print(': {0:^9} : {1:>9} : {2:>12} :'.format('age(days)', 'systems', 'snapshots'))
        for a_row in by_quarter:
            print(': {age:^9} : {servers_affected:9d} : {snapshots:12d} :'.format(**a_row))
    else:
        print('No snapshots remain.')


def purge(opt):
    """Remove from the rhnsnapshot* tables anything that happened more than <num_days> old"""
    print('Deleting snapshots older than ' + str(opt.num_days) + ' days')
    _log('Deleting snapshots older than ' + str(opt.num_days) + ' days')


    now_sql = _get_sql('GET_NOW')
    now_ts = rhnSQL.execute(now_sql).fetchone()[0]

    name = 'rhnsnapshot'

    print('{0:12d} snapshots currently'.format(count(name)))
    _log('{0:12d} snapshots currently'.format(count(name)))

    to_be_purged = purge_count(name, opt, now_ts)
    print('{0:12d} snapshots to be deleted, {1:d} per commit'.format(to_be_purged, opt.batch_size))
    _log('{0:12d} snapshots to be deleted, {1:d} per commit'.format(to_be_purged, opt.batch_size))

    lock_str = _get_sql('LOCK_GROUPS')
    del_str = _get_sql('PURGE_DATA')
    while to_be_purged > 0:
        print('...{0:12d} snapshots left to purge'.format(to_be_purged))
        _log('...{0:12d} snapshots left to purge'.format(to_be_purged))
        # Commit after each batch - if something blows us up, we don't want to lose the
        # work that's already been done.
        # Also limits size of redo-log and friends
        rhnSQL.transaction('PURGE_BATCH')
        stmt = rhnSQL.execute(lock_str)
        stmt = rhnSQL.execute(del_str, current_timestamp=now_ts, batchsize=opt.batch_size, num_days=opt.num_days)
        rhnSQL.commit()
        to_be_purged = purge_count(name, opt, now_ts)

    print('{0:12d} snapshots remain'.format(count(name)))
    _log('{0:12d} snapshots remain'.format(count(name)))
    return

if __name__ == '__main__':
    (options, args) = validate_options()
    initCFG('server')
    rhnSQL.initDB()

    if options.reports:
        report(options)
        sys.exit(0)

    rhnLog.initLOG(LOG_LOCATION)
    if rhnLog.LOG.real == 0:
        print('Failed to access logfile ' + LOG_LOCATION + ' - exiting...')
        print('(Try running as sudo)')
        sys.exit(1)

    if options.num_days is not None:
        purge(options)
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!
openSUSE Build Service is sponsored by