File spacewalk-proxy-git-945.3e3d1b6.obscpio of Package spacewalk-proxy

07070100000000000041FD00000000000000000000000B68DD3ED300000000000000000000000000000000000000000000001000000000spacewalk-proxy07070100000001000081B400000000000000000000000168DD3ED300000728000000000000000000000000000000000000001E00000000spacewalk-proxy/Makefile.defs# Common pathnames and programs for the Spacewalk project
#

# if not defined, definit as a noop
TOP		?= .

# global defines which control this build and where we deploy files
ROOT		?= /usr/share/rhn
export ROOT

PREFIX		?=
export PREFIX

# Compilation stuff
CC		= gcc
PYTHON_INCLUDE	= -I/usr/include/python$(PythonVersion)
CFLAGS		= -Wall -O2 -fomit-frame-pointer $(PYTHON_INCLUDE) -fPIC
SOFLAGS		= -shared -fPIC

# Installation stuff
INSTALL		= /usr/bin/install -c --verbose
INSTALL_BIN	= $(INSTALL) -m 755
INSTALL_DATA	= $(INSTALL) -m 644
INSTALL_DIR	= $(INSTALL) -m 755 -d

# This is for the subdir part
PYFILES		= $(addsuffix .py,$(FILES))

# what do we need to install and where
INSTALL_FILES	+= $(PYFILES)
INSTALL_DEST	?= $(ROOT)/$(SUBDIR)

DIRS		+= $(addprefix $(PREFIX), \
			$(sort $(EXTRA_DIRS)) $(INSTALL_DEST))

all :: $(INSTALL_FILES)

install :: all $(DIRS) $(INSTALL_FILES)
	@$(foreach f,$(INSTALL_FILES), \
		$(INSTALL_DATA) $(f) $(PREFIX)$(INSTALL_DEST)/$(f) ; )

$(DIRS):
	$(INSTALL_DIR) $@

clean ::
	@rm -fv *~ *.pyc *.pyo .??*~
	@rm -fv .\#*
	@rm -fv core

# useful macro
descend-subdirs = @$(foreach d,$(SUBDIRS), $(MAKE) -C $(d) $@ || exit 1; )

# subdirs are treated at the end
all install clean:: $(SUBDIRS)
	$(descend-subdirs)


# extra toy targets
# Python checker support
PYTHONPATH      = $(TOP)
PYCHECKER       = pychecker
PYCHECKEROPTS   = --maxreturns 10 --maxbranches 15
DBCHECKER       = db-checker.py
DBCHECKEROPTS   =
DB              = user/pass@instance

pychecker :: $(PYFILES)
	@PYTHONPATH=$(PYTHONPATH) $(PYCHECKER) $(PYCHECKEROPTS) $(PYFILES) || :
	$(descend-subdirs)

db-checker :: $(PYFILES)
	@PYTHONPATH=$(PYTHONPATH) $(TOP)/$(DBCHECKER) $(DBCHECKEROPTS) $(PYFILES) || :
	$(descend-subdirs)

graphviz :: 
	@PYTHONPATH=$(PYTHONPATH) $(PYCHECKER) -Z $(PYCHECKEROPTS) $(PYFILES) || exit 0

07070100000002000081B400000000000000000000000168DD3ED300000739000000000000000000000000000000000000001F00000000spacewalk-proxy/Makefile.proxy# Makefile for building Spacewalk Proxy snapshots
#

.DEFAULT_GOAL  :=  all

TOP	= .

SUBDIR  = proxy

# check if we can build man pages
DOCBOOK = $(wildcard /usr/bin/docbook2man)
SGMLS   = $(wildcard *.sgml)
MANS    = $(patsubst %.sgml,%.8,$(SGMLS))
MANDIR  ?= /usr/share/man

CODE_DIRS = broker redirect pm wsgi
CONF_DIRS = httpd-conf rhn-conf logrotate
PACKAGES_DIR        = $(PREFIX)/var/up2date/packages
PACKAGES_LIST_DIR   = $(PREFIX)/var/up2date/list

FILES	= __init__ apacheHandler apacheServer responseContext \
        rhnAuthCacheClient rhnAuthProtocol rhnConstants \
        rhnProxyAuth rhnShared
TAR_EXCLUDE = install

SERVICE_SCRIPTS = rhn-proxy

# We look for config files in "well known" locations (rhn-conf,
# httpd-conf, logrotate)
EXTRA_DIRS = $(MANDIR)/man8 /var/log/rhn /var/cache/rhn /usr/sbin

all :: all-code all-conf
	echo $(MANS)

%-code : Makefile.proxy
	@$(foreach d,$(CODE_DIRS), $(MAKE) -C $(d) $* || exit 1; ) 

%-conf : Makefile.proxy
	@$(foreach d,$(CONF_DIRS), $(MAKE) -C $(d) $* || exit 1; ) 

# now include some Macros
include $(TOP)/Makefile.defs

# install service scripts
all :: $(SERVICE_SCRIPTS)
install :: $(SERVICE_SCRIPTS) $(PREFIX)/$(INITDIR)
	$(INSTALL_BIN) $(SERVICE_SCRIPTS) $(PREFIX)/usr/sbin

ifneq ($(DOCBOOK),)
# install man pages
all     :: $(MANS)
install :: $(MANS) $(PREFIX)/$(MANDIR)
	$(INSTALL_DATA) $(MANS) $(PREFIX)/$(MANDIR)/man8
endif

install :: install-code install-conf install-var

install-var: $(PACKAGES_DIR) $(PACKAGES_LIST_DIR)

%.8 : %.sgml
	/usr/bin/docbook2man $<

$(PACKAGES_DIR) $(PACKAGES_LIST_DIR):
	$(INSTALL_DIR) $@

clean :: clean-code clean-conf
	@rm -fv $(MANS) manpage.*

pylint ::
# :E1101: *%s %r has no %r member*
	pylint --errors-only --disable-msg=E1101 --enable-msg-cat=imports *py broker/ || pylint --errors-only --disable=E1101 --enable=imports *py broker/
07070100000003000081B400000000000000000000000168DD3ED300000334000000000000000000000000000000000000002000000000spacewalk-proxy/Makefile.pythonTHIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../../rel-eng/Makefile.python

# Docker tests variables
DOCKER_CONTAINER_BASE = uyuni-master
DOCKER_REGISTRY       = registry.mgr.suse.de
DOCKER_RUN_EXPORT     = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES        = -v "$(CURDIR)/../../:/manager"

__pylint ::
	$(call update_pip_env)
	pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true

docker_pylint ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/proxy/proxy; make -f Makefile.python __pylint"

docker_shell ::
	docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash
07070100000004000081B400000000000000000000000168DD3ED30000029E000000000000000000000000000000000000001C00000000spacewalk-proxy/__init__.py# pylint: disable=missing-module-docstring
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

__all__ = []
07070100000005000081B400000000000000000000000168DD3ED30000669F000000000000000000000000000000000000002100000000spacewalk-proxy/apacheHandler.py# pylint: disable=missing-module-docstring,invalid-name
# Main entry point for apacheServer.py for the Spacewalk Proxy
# and/or SSL Redirect Server.
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# -----------------------------------------------------------------------------

# language imports
import os
import base64

try:
    #  python 2
    import xmlrpclib
except ImportError:
    #  python 3
    import xmlrpc.client as xmlrpclib
import re

# common imports
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnApache import rhnApache
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common import rhnFlags, apache
from spacewalk.common import byterange
from rhn import rpclib, connections
from rhn.UserDictCase import UserDictCase
from uyuni.common.rhnLib import setHeaderValue
from proxy.rhnProxyAuth import get_proxy_auth


from .rhnConstants import (
    HEADER_ACTUAL_URI,
    HEADER_EFFECTIVE_URI,
    HEADER_CHECKSUM,
    SCHEME_HTTP,
    SCHEME_HTTPS,
    URI_PREFIX_KS,
    URI_PREFIX_KS_CHECKSUM,
    COMPONENT_BROKER,
    COMPONENT_REDIRECT,
)


# pylint: disable-next=invalid-name
def getComponentType(req):
    """
    Are we a 'proxy.broker' or a 'proxy.redirect'.

    Checks to see if the last visited Spacewalk Proxy was itself. If so, we
    are a 'proxy.redirect'. If not, then we must be a 'proxy.broker'.
    """

    # NOTE: X-RHN-Proxy-Auth described in broker/rhnProxyAuth.py
    if "X-RHN-Proxy-Auth" not in req.headers_in:
        # Request comes from a client, Must be the broker
        return COMPONENT_BROKER

    # Might be obsolete if proxy is traditionally registered
    if "X-Suse-Auth-Token" in req.headers_in:
        return COMPONENT_REDIRECT

    # pull server id out of "t:o:k:e:n:hostname1,t:o:k:e:n:hostname2,..."
    proxy_auth = req.headers_in["X-RHN-Proxy-Auth"]
    last_auth = proxy_auth.split(",")[-1]
    last_visited = last_auth.split(":")[0]
    proxy_server_id = get_proxy_auth().getProxyServerId()
    # is it the same box?
    try:
        log_debug(4, "last_visited", last_visited, "; proxy server id", proxy_server_id)
    # pylint: disable=W0702
    except:
        # pylint: disable=W0702
        # incase called prior to the log files being initialized
        pass
    if last_visited == proxy_server_id:
        # XXX this assumes redirect runs on the same box as the broker
        return COMPONENT_REDIRECT

    return COMPONENT_BROKER


# pylint: disable-next=invalid-name
class apacheHandler(rhnApache):
    """Main apache entry point for the proxy."""

    _lang_catalog = "proxy"

    def __init__(self):
        rhnApache.__init__(self)
        self.input = None
        self._component = None

    def set_component(self, component):
        self._component = component

    @staticmethod
    def _setSessionToken(headers):
        # extended to always return a token, even if an empty one
        ret = rhnApache._setSessionToken(headers)
        if ret:
            log_debug(4, "Returning", ret)
            return ret

        # Session token did not verify, we have an empty auth token
        token = UserDictCase()
        rhnFlags.set("AUTH_SESSION_TOKEN", token)
        return token

    def headerParserHandler(self, req):
        """Name-munging if request came from anaconda in response to a
        kickstart."""
        ret = rhnApache.headerParserHandler(self, req)
        if ret != apache.OK:
            return ret

        self.input = rpclib.transports.Input(req.headers_in)

        # Before we allow the main handler code to commence, we'll first check
        # to see if this request came from anaconda in response to a kickstart.
        # If so, we'll need to do some special name-munging before we continue.

        ret = self._transformKickstartRequest(req)
        return ret

    # pylint: disable-next=invalid-name
    def _transformKickstartRequest(self, req):
        """If necessary, this routine will transform a "tinified" anaconda-
        generated kickstart request into a normalized form capable of being
        cached effectively by squid.

        This is done by first making a HEAD request
        to the satellite for the purpose of updating the kickstart progress and
        retrieving an MD5 sum for the requested file.  We then replace the
        tinyURL part of the URI with the retrieved MD5 sum.  This effectively
        removes session-specific information while allowing us to still cache
        based on the uniqueness of the file.
        """
        # Kickstart requests only come in the form of a GET, so short-circuit
        # if that is not the case.

        if req.method != "GET":
            return apache.OK

        log_debug(6, "URI", req.uri)
        log_debug(6, "COMPONENT", self._component)

        # If we're a broker, we know that this is a kickstart request from
        # anaconda by checking if the URI begins with /ty/*, otherwise just
        # return.  If we're an SSL redirect, we check that the URI begins with
        # /ty-cksm/*, otherwise return.

        if self._component == COMPONENT_BROKER:
            if req.uri.startswith(URI_PREFIX_KS):
                # pylint: disable-next=consider-using-f-string
                log_debug(3, "Found a kickstart URI: %s" % req.uri)
                return self._transformKsRequestForBroker(req)
        elif self._component == COMPONENT_REDIRECT:
            if req.uri.startswith(URI_PREFIX_KS_CHECKSUM):
                # pylint: disable-next=consider-using-f-string
                log_debug(3, "Found a kickstart checksum URI: %s" % req.uri)
                return self._transformKsRequestForRedirect(req)

        return apache.OK

    # pylint: disable-next=invalid-name
    def _transformKsRequestForBroker(self, req):

        # Get the checksum for the requested resource from the satellite.

        (status, checksum) = self._querySatelliteForChecksum(req)
        if status != apache.OK or not checksum:
            return status

        # If we got this far, we have the checksum.  Create a new URI based on
        # the checksum.

        # pylint: disable-next=invalid-name
        newURI = self._generateCacheableKickstartURI(req.uri, checksum)
        if not newURI:
            # Couldn't create a cacheable URI, log an error and revert to
            # BZ 158236 behavior.

            # pylint: disable-next=consider-using-f-string
            log_error('Could not create cacheable ks URI from "%s"' % req.uri)
            return apache.OK

        # Now we must embed the old URI into a header in the original request
        # so that the SSL Redirect has it available if the resource has not
        # been cached yet.  We will also embed a header that holds the new URI,
        # so that the content handler can use it later.

        # pylint: disable-next=consider-using-f-string
        log_debug(3, "Generated new kickstart URI: %s" % newURI)
        req.headers_in[HEADER_ACTUAL_URI] = req.uri
        req.headers_in[HEADER_EFFECTIVE_URI] = newURI

        return apache.OK

    @staticmethod
    # pylint: disable-next=invalid-name
    def _transformKsRequestForRedirect(req):

        # If we don't get the actual URI in the headers, we'll decline the
        # request.

        if not req.headers_in or HEADER_ACTUAL_URI not in req.headers_in:
            log_error(
                # pylint: disable-next=consider-using-f-string
                "Kickstart request header did not include '%s'"
                % HEADER_ACTUAL_URI
            )
            return apache.DECLINED

        # The original URI is embedded in the headers under X-RHN-ActualURI.
        # Remove it, and place it in the X-RHN-EffectiveURI header.

        req.headers_in[HEADER_EFFECTIVE_URI] = req.headers_in[HEADER_ACTUAL_URI]
        # pylint: disable-next=consider-using-f-string
        log_debug(3, "Reverting to old URI: %s" % req.headers_in[HEADER_ACTUAL_URI])

        return apache.OK

    # pylint: disable-next=invalid-name
    def _querySatelliteForChecksum(self, req):
        """Sends a HEAD request to the satellite for the purpose of obtaining
        the checksum for the requested resource.  A (status, checksum)
        tuple is returned.  If status is not apache.OK, checksum will be
        None.  If status is OK, and a checksum is not returned, the old
        BZ 158236 behavior will be used.
        """
        scheme = SCHEME_HTTP
        if req.server.port == 443:
            scheme = SCHEME_HTTPS
        # pylint: disable-next=consider-using-f-string
        log_debug(6, "Using scheme: %s" % scheme)

        # Initiate a HEAD request to the satellite to retrieve the MD5 sum.
        # Actually, we make the request through our own proxy first, so
        # that we don't accidentally bypass necessary authentication
        # routines.  Since it's a HEAD request, the proxy will forward it
        # directly to the satellite like it would a POST request.

        host = "127.0.0.1"
        port = req.connection.local_addr[1]

        connection = self._createConnection(host, port, scheme)
        if not connection:
            # Couldn't form the connection.  Log an error and revert to the
            # old BZ 158236 behavior.  In order to be as robust as possible,
            # we won't fail here.

            log_error(
                # pylint: disable-next=consider-using-f-string
                "HEAD req - Could not create connection to %s://%s:%s"
                % (scheme, host, str(port))
            )
            return (apache.OK, None)

        # We obtained the connection successfully.  Construct the URL that
        # we'll connect to.

        # pylint: disable-next=invalid-name,consider-using-f-string
        pingURL = "%s://%s:%s%s" % (scheme, host, str(port), req.uri)
        # pylint: disable-next=consider-using-f-string
        log_debug(6, "Ping URI: %s" % pingURL)

        hdrs = UserDictCase()
        for k in list(req.headers_in.keys()):
            if k.lower() != "range":  # we want checksum of whole file
                hdrs[k] = re.sub(
                    r"\n(?![ \t])|\r(?![ \t\n])", "", str(req.headers_in[k])
                )

        log_debug(9, "Using existing headers_in", hdrs)
        connection.request("HEAD", pingURL, None, hdrs)
        log_debug(6, "Connection made, awaiting response.")

        # Get the response.

        response = connection.getresponse()
        # pylint: disable-next=consider-using-f-string
        log_debug(6, "Received response status: %s" % response.status)
        connection.close()

        if (response.status != apache.HTTP_OK) and (
            response.status != apache.HTTP_PARTIAL_CONTENT
        ):
            # Something bad happened.  Return back back to the client.

            log_debug(
                1,
                # pylint: disable-next=consider-using-f-string
                "HEAD req - Received error code in reponse: %s"
                % (str(response.status)),
            )
            return (response.status, None)

        # The request was successful.  Dig the MD5 checksum out of the headers.

        # pylint: disable-next=invalid-name
        responseHdrs = response.msg
        if not responseHdrs:
            # No headers?!  This shouldn't happen at all.  But if it does,
            # revert to the old # BZ 158236 behavior.

            log_error("HEAD response - No HTTP headers!")
            return (apache.OK, None)

        if HEADER_CHECKSUM not in responseHdrs:
            # No checksum was provided.  This could happen if a newer
            # proxy is talking to an older satellite.  To keep things
            # running smoothly, we'll just revert to the BZ 158236
            # behavior.

            log_debug(1, "HEAD response - No X-RHN-Checksum field provided!")
            return (apache.OK, None)

        checksum = responseHdrs[HEADER_CHECKSUM]

        return (apache.OK, checksum)

    @staticmethod
    # pylint: disable-next=invalid-name,invalid-name
    def _generateCacheableKickstartURI(oldURI, checksum):
        """
        This routine computes a new cacheable URI based on the old URI and the
        checksum. For example, if the checksum is 1234ABCD and the oldURI was:

            /ty/AljAmCEt/RedHat/base/comps.xml

        Then, the new URI will be:

            /ty-cksm/1234ABCD/RedHat/base/comps.xml

        If for some reason the new URI could not be generated, return None.
        """

        # pylint: disable-next=invalid-name
        newURI = URI_PREFIX_KS_CHECKSUM + checksum

        # Strip the first two path pieces off of the oldURI.

        # pylint: disable-next=invalid-name
        uriParts = oldURI.split("/")
        # pylint: disable-next=invalid-name
        numParts = 0
        for part in uriParts:
            if len(part) != 0:  # Account for double slashes ("//")
                # pylint: disable-next=invalid-name
                numParts += 1
                if numParts > 2:
                    # pylint: disable-next=invalid-name
                    newURI += "/" + part

        # If the URI didn't have enough parts, return None.

        if numParts <= 2:
            # pylint: disable-next=invalid-name
            newURI = None

        return newURI

    @staticmethod
    # pylint: disable-next=invalid-name
    def _createConnection(host, port, scheme):
        params = {"host": host, "port": port}

        if CFG.has_key("timeout"):
            params["timeout"] = CFG.TIMEOUT

        if scheme == SCHEME_HTTPS:
            conn_class = connections.HTTPSConnection
        else:
            conn_class = connections.HTTPConnection

        return conn_class(**params)

    def handler(self, req):
        """Main handler to handle all requests pumped through this server."""

        ret = rhnApache.handler(self, req)
        if ret != apache.OK:
            return ret

        log_debug(4, "METHOD", req.method)
        log_debug(4, "PATH_INFO", req.path_info)
        log_debug(4, "URI (full path info)", req.uri)
        log_debug(4, "Component", self._component)

        if self._component == COMPONENT_BROKER:
            # pylint: disable-next=import-outside-toplevel
            from .broker import rhnBroker

            # pylint: disable-next=invalid-name
            handlerObj = rhnBroker.BrokerHandler(req)
        else:
            # Redirect
            # pylint: disable-next=import-outside-toplevel
            from .redirect import rhnRedirect

            # pylint: disable-next=invalid-name
            handlerObj = rhnRedirect.RedirectHandler(req)

        try:
            ret = handlerObj.handler()
        except rhnFault as e:
            return self.response(req, e)

        if rhnFlags.test("NeedEncoding"):
            return self.response(req, ret)

        # All good; we expect ret to be an HTTP return code
        if not isinstance(ret, type(1)):
            # pylint: disable-next=consider-using-f-string
            raise rhnException("Invalid status code type %s" % type(ret))
        # pylint: disable-next=consider-using-f-string
        log_debug(2, "Leaving with status code %s" % ret)
        return ret

    @staticmethod
    def normalize(response):
        """convert a response to the right type for passing back to
        rpclib.xmlrpclib.dumps
        """
        if isinstance(response, xmlrpclib.Fault):
            return response
        return (response,)

    @staticmethod
    def response_file(req, response):
        """send a file out"""
        log_debug(3, response.name)
        # We may set the content type remotely
        if rhnFlags.test("Content-Type"):
            req.content_type = rhnFlags.get("Content-Type")
        else:
            # Safe default
            req.content_type = "application/octet-stream"

        # find out the size of the file
        if response.length == 0:
            response.file_obj.seek(0, 2)
            file_size = response.file_obj.tell()
            response.file_obj.seek(0, 0)
        else:
            file_size = response.length

        success_response = apache.OK
        response_size = file_size

        # Serve up the requested byte range
        if "Range" in req.headers_in:
            try:
                range_start, range_end = byterange.parse_byteranges(
                    req.headers_in["Range"], file_size
                )
                response_size = range_end - range_start
                req.headers_out["Content-Range"] = byterange.get_content_range(
                    range_start, range_end, file_size
                )
                req.headers_out["Accept-Ranges"] = "bytes"

                response.file_obj.seek(range_start)

                # We'll want to send back a partial content rather than ok
                # if this works
                req.status = apache.HTTP_PARTIAL_CONTENT
                success_response = apache.HTTP_PARTIAL_CONTENT

            # For now we will just return the file file on the following exceptions
            except byterange.InvalidByteRangeException:
                pass
            except byterange.UnsatisfyableByteRangeException:
                pass

        req.headers_out["Content-Length"] = str(response_size)

        # if we loaded this from a real fd, set it as the X-Replace-Content
        # check for "name" since sometimes we get xmlrpclib.transports.File's that have
        # a stringIO as the file_obj, and they dont have a .name (ie,
        # fileLists...)
        if response.name:
            req.headers_out["X-Package-FileName"] = response.name

        xrepcon = "X-Replace-Content-Active" in req.headers_in and rhnFlags.test(
            "Download-Accelerator-Path"
        )
        if xrepcon:
            fpath = rhnFlags.get("Download-Accelerator-Path")
            # pylint: disable-next=consider-using-f-string
            log_debug(1, "Serving file %s" % fpath)
            req.headers_out["X-Replace-Content"] = fpath
            # Only set a byte rate if xrepcon is active
            byte_rate = rhnFlags.get("QOS-Max-Bandwidth")
            if byte_rate:
                req.headers_out["X-Replace-Content-Throttle"] = str(byte_rate)

        # send the headers
        req.send_http_header()

        if "Range" in req.headers_in:
            # and the file
            read = 0
            while read < response_size:
                # We check the size here in case we're not asked for the entire file.
                buf = response.read(CFG.BUFFER_SIZE)
                if not buf:
                    break
                try:
                    req.write(buf)
                    read = read + CFG.BUFFER_SIZE
                except IOError:
                    if xrepcon:
                        # We're talking to a proxy, so don't bother to report
                        # a SIGPIPE
                        break
                    return apache.HTTP_BAD_REQUEST
            response.close()
        else:
            if "wsgi.file_wrapper" in req.headers_in:
                req.output = req.headers_in["wsgi.file_wrapper"](
                    response, CFG.BUFFER_SIZE
                )
            else:
                req.output = iter(lambda: response.read(CFG.BUFFER_SIZE), "")
        return success_response

    def response(self, req, response):
        """send the response (common code)"""

        # Send the xml-rpc response back
        log_debug(5, "Response type", type(response))

        needs_xmlrpc_encoding = rhnFlags.test("NeedEncoding")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, rpclib.transports.File):
            if not hasattr(response.file_obj, "fileno") and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(req, response)

        is_fault = 0
        if isinstance(response, rhnFault):
            if req.method == "GET":
                return self._response_fault_get(req, response.getxml())
            # Need to encode the response as xmlrpc
            response = response.getxml()
            is_fault = 1
            # No compression
            compress_response = 0
            # This is an xmlrpc Fault, so we have to encode it
            needs_xmlrpc_encoding = 1

        output = rpclib.transports.Output()

        if not is_fault:
            # First, use the same encoding/transfer that the client used
            output.set_transport_flags(
                transfer=rpclib.transports.lookupTransfer(self.input.transfer),
                encoding=rpclib.transports.lookupEncoding(self.input.encoding),
            )

        if compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.clientVersion)
            if self.clientVersion > 0:
                output.set_transport_flags(output.TRANSFER_BINARY, output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64, output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get("outputTransportOptions").dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = rpclib.xmlrpclib.dumps(response, methodresponse=1)
            except TypeError as e:
                # pylint: disable-next=consider-using-f-string
                log_debug(-1, 'Error "%s" encoding response = %s' % (e, response))
                Traceback(
                    "apacheHandler.response",
                    req,
                    # pylint: disable-next=consider-using-f-string
                    extra='Error "%s" encoding response = %s' % (e, response),
                    severity="notification",
                )
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except Exception:  # pylint: disable=E0012, W0703
                # Uncaught exception; signal the error
                Traceback("apacheHandler.response", req, severity="unhandled")
                return apache.HTTP_INTERNAL_SERVER_ERROR

        # we're about done here, patch up the headers
        output.process(response)
        # Copy the rest of the fields
        for k, v in list(output.headers.items()):
            if k.lower() == "content-type":
                # Content-type
                req.content_type = v
            else:
                setHeaderValue(req.headers_out, k, v)

        if CFG.DEBUG == 4:
            # I wrap this in an "if" so we don't parse a large file for no reason.
            log_debug(
                4,
                # pylint: disable-next=consider-using-f-string
                "The response: %s[...SNIP (for sanity) SNIP...]%s"
                % (response[:100], response[-100:]),
            )
        elif CFG.DEBUG >= 5:
            # if you absolutely must have that whole response in the log file
            # pylint: disable-next=consider-using-f-string
            log_debug(5, "The response: %s" % response)

        # send the headers
        req.send_http_header()
        try:
            # XXX: in case data is really large maybe we should split
            # it in smaller chunks instead of blasting everything at
            # once. Not yet a problem...
            req.write(output.data)
        except IOError:
            # send_http_header is already sent, so it doesn't make a lot of
            # sense to return a non-200 error; but there is no better solution
            return apache.HTTP_BAD_REQUEST
        del output
        return apache.OK

    @staticmethod
    def _response_fault_get(req, response):
        req.headers_out["X-RHN-Fault-Code"] = str(response.faultCode)
        # pylint: disable-next=invalid-name
        faultString = (
            base64.encodestring(response.faultString.encode()).decode().strip()
        )  # pylint: disable=deprecated-method
        # Split the faultString into multiple lines
        for line in faultString.split("\n"):
            req.headers_out.add("X-RHN-Fault-String", line.strip())
        # And then send all the other things
        for k, v in list(rhnFlags.get("outputTransportOptions").items()):
            setHeaderValue(req.headers_out, k, v)
        return apache.HTTP_NOT_FOUND

    # pylint: disable-next=arguments-renamed
    def cleanupHandler(self, req):
        """Clean up stuff before we close down the session when we are
        called from apacheServer.Cleanup()
        """

        log_debug(2)
        self.input = None
        # kill all of our child processes (if any)
        while 1:
            pid = status = -1
            try:
                (pid, status) = os.waitpid(-1, 0)
            except OSError:
                break
            else:
                # pylint: disable-next=consider-using-f-string
                log_error("Reaped child process %d with status %d" % (pid, status))
        ret = rhnApache.cleanupHandler(self, req)
        return ret


# =============================================================================
07070100000006000081B400000000000000000000000168DD3ED300000E36000000000000000000000000000000000000002000000000spacewalk-proxy/apacheServer.py# pylint: disable=missing-module-docstring,invalid-name
# apacheServer.py      - Apache XML-RPC server for mod_python (Spacewalk).
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

# common module imports
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnLog import initLOG, log_setreq, log_debug
from spacewalk.common.rhnTB import Traceback
from spacewalk.common import apache


class HandlerWrap:
    """Wrapper handlers to catch unwanted exceptions"""

    # pylint: disable-next=invalid-name
    svrHandlers = None

    def __init__(self, name, init=0):
        self.__name = name
        # Flag: should we initialize the config and logging components?
        self.__init = init

    def __call__(self, req):
        # NOTE: all imports done here due to required initialization of
        #       of the configuration module before all others.
        #       Initialization is dependent on RHNComponentType in the
        #       req object.

        if self.__init:
            # pylint: disable-next=import-outside-toplevel
            from .apacheHandler import getComponentType

            # We cannot trust the config files to tell us if we are in the
            # broker or in the redirect because we try to always pass
            # upstream all requests
            # pylint: disable-next=invalid-name
            componentType = getComponentType(req)
            initCFG(componentType)
            initLOG(CFG.LOG_FILE, CFG.DEBUG, f"wsgi_{componentType}")
            # pylint: disable-next=consider-using-f-string
            log_debug(2, "New request, component %s" % (componentType,))

        # Instantiate the handlers
        if HandlerWrap.svrHandlers is None:
            HandlerWrap.svrHandlers = self.get_handler_factory(req)()

        if self.__init:
            # Set the component type
            HandlerWrap.svrHandlers.set_component(componentType)

        try:
            log_setreq(req)
            if hasattr(HandlerWrap.svrHandlers, self.__name):
                f = getattr(HandlerWrap.svrHandlers, self.__name)
                ret = f(req)
            else:
                # pylint: disable-next=broad-exception-raised,consider-using-f-string
                raise Exception("Class has no attribute %s" % self.__name)
        # pylint: disable=W0702
        except:
            Traceback(
                self.__name, req, extra="Unhandled exception type", severity="unhandled"
            )
            return apache.HTTP_INTERNAL_SERVER_ERROR
        else:
            return ret

    @staticmethod
    # pylint: disable-next=invalid-name
    def get_handler_factory(_req):
        """Handler factory. Redefine in your subclasses if so choose"""
        # pylint: disable-next=import-outside-toplevel
        from .apacheHandler import apacheHandler

        return apacheHandler


# Instantiate external entry points:
HeaderParserHandler = HandlerWrap("headerParserHandler", init=1)
Handler = HandlerWrap("handler")
CleanupHandler = HandlerWrap("cleanupHandler")
LogHandler = HandlerWrap("logHandler")
07070100000007000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001700000000spacewalk-proxy/broker07070100000008000081B400000000000000000000000168DD3ED3000000A7000000000000000000000000000000000000002000000000spacewalk-proxy/broker/Makefile# Makefile for the apacheServer.py for Spacewalk Proxy Server.
#

TOP	= ..
SUBDIR	= proxy/broker
FILES	= __init__ rhnRepository rhnBroker
include $(TOP)/Makefile.defs
07070100000009000081B400000000000000000000000168DD3ED300000265000000000000000000000000000000000000002300000000spacewalk-proxy/broker/__init__.py#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
0707010000000A000081B400000000000000000000000168DD3ED300008AEC000000000000000000000000000000000000002400000000spacewalk-proxy/broker/rhnBroker.py# pylint: disable=missing-module-docstring,invalid-name
# Spacewalk Proxy Server Broker handler code.
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

# system module imports
import time
import socket
import re
import os
import base64

try:
    # python 3
    from urllib.parse import urlparse, urlunparse
except ImportError:
    # python 2
    from urlparse import urlparse, urlunparse

# common module imports
from rhn.UserDictCase import UserDictCase
from rhn.stringutils import ustr
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault
from spacewalk.common import rhnFlags, apache
from spacewalk.common.rhnTranslate import _
from spacewalk.common import suseLib
from uyuni.common.rhnLib import parseUrl

# local module imports
from proxy.rhnShared import SharedHandler
from proxy.rhnConstants import URI_PREFIX_KS_CHECKSUM
import proxy.rhnProxyAuth
from . import rhnRepository


# the version should not be never decreased, never mind that spacewalk has different versioning
_PROXY_VERSION = "5.5.0"
# HISTORY: '0.9.7', '3.2.0', '3.5.0', '3.6.0', '4.1.0',
#          '4.2.0', '5.0.0', '5.1.0', '5.2.0', '0.1',
#          '5.3.0', '5.3.1', '5.4.0', '5.5.0'


class BrokerHandler(SharedHandler):
    """Spacewalk Proxy broker specific handler code called by rhnApache.

    Workflow is:
    Client -> Apache:Broker -> Squid -> Apache:Redirect -> Satellite

    Broker handler get request from clients from outside. Some request
    (POST and HEAD) bypass cache so, it is passed directly to parent.
    For everything else we transform destination to localhost:80 (which
    is handled by Redirect handler) and set proxy as local squid.
    This way we got all request cached localy by squid.
    """

    # pylint: disable=R0902
    def __init__(self, req):
        SharedHandler.__init__(self, req)

        # Initialize variables
        # pylint: disable-next=invalid-name
        self.componentType = "proxy.broker"
        # pylint: disable-next=invalid-name
        self.cachedClientInfo = None  # headers - session token
        # pylint: disable-next=invalid-name
        self.authChannels = None
        # pylint: disable-next=invalid-name
        self.clientServerId = None
        # pylint: disable-next=invalid-name
        self.rhnParentXMLRPC = None
        # pylint: disable-next=invalid-name
        self.authToken = None
        # pylint: disable-next=invalid-name
        self.fullRequestURL = None
        hostname = ""
        # should *always* exist and be my ip address
        my_ip_addr = req.headers_in["SERVER_ADDR"]
        if "Host" in req.headers_in:
            # the client has provided a host header
            try:
                # When a client with python 2.4 (RHEL 5) uses SSL
                # the host header is in the 'hostname:port' form
                # (In python 2.6 RFE #1472176 changed this and 'hostname'
                # is used). We need to use the 'hostname' part in any case
                # or we create bogus 'hostname:port' DNS queries
                host_header = req.headers_in["Host"].split(":")[0]
                if (
                    host_header != my_ip_addr
                    and socket.gethostbyname(host_header) == my_ip_addr
                ):
                    # if host header is valid (i.e. not just an /etc/hosts
                    # entry on the client or the hostname of some other
                    # machine (say a load balancer)) then use it
                    hostname = host_header
            except (socket.gaierror, socket.error, socket.herror, socket.timeout):
                # hostname probably didn't exist, fine
                pass
        if not hostname and CFG.has_key("PROXY_FQDN"):
            # Not resolvable hostname, check container config
            # pylint: disable-next=consider-using-f-string
            log_debug(2, "Using PROXY_FQDN config %s" % CFG.PROXY_FQDN)
            hostname = CFG.PROXY_FQDN
        if not hostname:
            # okay, that didn't work, let's do a reverse dns lookup on my
            # ip address
            try:
                hostname = socket.gethostbyaddr(my_ip_addr)[0]
            except (socket.gaierror, socket.error, socket.herror, socket.timeout):
                # unknown host, we don't have a hostname?
                pass
        if not hostname:
            # this shouldn't happen
            # socket.gethostname is a punt. Shouldn't need to do it.
            hostname = socket.gethostname()
            log_debug(
                -1,
                # pylint: disable-next=consider-using-f-string
                "WARNING: no hostname in the incoming headers; "
                "punting: %s" % hostname,
            )
        hostname = parseUrl(hostname)[1].split(":")[0]
        # pylint: disable-next=invalid-name
        self.proxyAuth = proxy.rhnProxyAuth.get_proxy_auth(hostname)

        self._initConnectionVariables(req)

    # pylint: disable-next=invalid-name
    def _initConnectionVariables(self, req):
        """set connection variables
        NOTE: self.{caChain,rhnParent,httpProxy*} are initialized
              in SharedHandler

        rules:
            - GET requests:
                  . are non-SSLed (potentially SSLed by the redirect)
                  . use the local cache
                  . use the SSL Redirect
                    (i.e., parent is now 127.0.0.1)
                    . NOTE: the reason we use the SSL Redirect if we
                            are going through an outside HTTP_PROXY:
                            o CFG.HTTP_PROXY is ONLY used by an SSL
                            redirect - maybe should rethink that.
            - POST and HEAD requests (not GET) bypass both the local cache
                   and SSL redirect (we SSL it directly)
        """

        scheme = "http"
        # self.{caChain,httpProxy*,rhnParent} initialized in rhnShared.py
        # pylint: disable-next=invalid-name
        effectiveURI = self._getEffectiveURI()
        # pylint: disable-next=invalid-name
        effectiveURI_parts = urlparse(effectiveURI)
        # Fixup effectiveURI_parts, if effectiveURI is dirty.
        # We are doing this because the ubuntu clients request uris like
        # 'http://hostname//XMLRPC...'. See bug 1220399 for details.
        if (
            not effectiveURI_parts.scheme
            and effectiveURI_parts.netloc
            and effectiveURI_parts.netloc == "XMLRPC"
        ):
            # pylint: disable-next=invalid-name
            effectiveURI_parts = urlparse(
                urlunparse(
                    [
                        "",
                        "",
                        "/" + effectiveURI_parts.netloc + effectiveURI_parts.path,
                        effectiveURI_parts.params,
                        effectiveURI_parts.query,
                        effectiveURI_parts.fragment,
                    ]
                )
            )

        # The auth token is sent in either a header or in the query part of the URI:
        # SLE minions -> query part of the URI.
        # RHEL minions -> 'X-Mgr-Auth' header.
        # Debian -> Authorization (Basic Auth)
        #
        # Traditional SLE and RHEL clients uses 'X-RHN-Auth' header, but
        # no auth token is used in order to authenticate.
        if "X-Mgr-Auth" in self.req.headers_in:
            self.authToken = self.req.headers_in["X-Mgr-Auth"]
            del self.req.headers_in["X-Mgr-Auth"]
        elif (
            "Authorization" in self.req.headers_in
            and effectiveURI_parts.path.startswith("/rhn/manager/download/")
        ):
            # we need to remove Basic Auth, otherwise squid does not cache the package
            # so we convert it into token auth
            # The token is the login. So it is not secret
            try:
                lpw = ustr(
                    base64.b64decode(self.req.headers_in["Authorization"][6:])
                )  # "Basic " == 6 characters
                self.authToken = lpw[: lpw.find(":")]
                del self.req.headers_in["Authorization"]
            # pylint: disable-next=broad-exception-caught
            except Exception as e:
                log_error("Unable to decode Authorization header.", e)
        elif "X-RHN-Auth" not in self.req.headers_in:
            self.authToken = effectiveURI_parts.query

        if req.method == "GET":
            # pylint: disable-next=consider-using-f-string
            self.fullRequestURL = "%s://%s%s" % (
                self.req.headers_in["REQUEST_SCHEME"],
                self.rhnParent,
                effectiveURI,
            )
            # pylint: disable-next=invalid-name
            effectiveURI_parts = urlparse(
                urlunparse(
                    [
                        effectiveURI_parts.scheme,
                        effectiveURI_parts.netloc,
                        effectiveURI_parts.path,
                        effectiveURI_parts.params,
                        "",
                        effectiveURI_parts.fragment,
                    ]
                )
            )
            scheme = "http"
            self.httpProxy = CFG.SQUID
            self.caChain = self.httpProxyUsername = self.httpProxyPassword = ""
            self.rhnParent = self.proxyAuth.hostname
        else:
            scheme = "https"

        self.rhnParentXMLRPC = urlunparse(
            (scheme, self.rhnParent, "/XMLRPC", "", "", "")
        )
        self.rhnParent = urlunparse((scheme, self.rhnParent) + effectiveURI_parts[2:])

        # pylint: disable-next=consider-using-f-string
        log_debug(2, "set self.rhnParent:       %s" % self.rhnParent)
        # pylint: disable-next=consider-using-f-string
        log_debug(2, "set self.rhnParentXMLRPC: %s" % self.rhnParentXMLRPC)
        if self.httpProxy:
            if self.httpProxyUsername and self.httpProxyPassword:
                log_debug(
                    2,
                    # pylint: disable-next=consider-using-f-string
                    "using self.httpProxy:     %s (authenticating)" % self.httpProxy,
                )
            else:
                log_debug(
                    2,
                    # pylint: disable-next=consider-using-f-string
                    "using self.httpProxy:     %s (non-authenticating)"
                    % self.httpProxy,
                )
        else:
            log_debug(2, "*not* using an http proxy")

    def handler(self):
        """Main handler to handle all requests pumped through this server."""

        # pylint: disable=R0915
        log_debug(2)
        self._prepHandler()

        # pylint: disable-next=invalid-name
        _oto = rhnFlags.get("outputTransportOptions")

        # tell parent that we can follow redirects, even if client is not able to
        _oto["X-RHN-Transport-Capability"] = "follow-redirects=3"

        # No reason to put Host: in the header, the connection object will
        # do that for us

        # Add/modify the X-RHN-IP-Path header.
        ip_path = None
        if "X-RHN-IP-Path" in _oto:
            ip_path = _oto["X-RHN-IP-Path"]
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "X-RHN-IP-Path is: %s" % repr(ip_path))
        client_ip = self.req.connection.remote_ip
        if ip_path is None:
            ip_path = client_ip
        else:
            ip_path += "," + client_ip
        _oto["X-RHN-IP-Path"] = ip_path

        # NOTE: X-RHN-Proxy-Auth described in broker/rhnProxyAuth.py
        if "X-RHN-Proxy-Auth" in _oto:
            log_debug(
                5,
                # pylint: disable-next=consider-using-f-string
                "X-RHN-Proxy-Auth currently set to: %s"
                % repr(_oto["X-RHN-Proxy-Auth"]),
            )
        else:
            log_debug(5, "X-RHN-Proxy-Auth is not set")

        if "X-RHN-Proxy-Auth" in self.req.headers_in:
            tokens = []
            if "X-RHN-Proxy-Auth" in _oto:
                tokens = _oto["X-RHN-Proxy-Auth"].split(",")
            # pylint: disable-next=consider-using-f-string
            log_debug(5, "Tokens: %s" % tokens)

        # GETs: authenticate user, and service local GETs.
        # pylint: disable-next=invalid-name
        getResult = self.__local_GET_handler(self.req)
        if getResult is not None:
            # it's a GET request
            return getResult

        # 1. check cached version of the proxy login,
        #    snag token if there...
        #    if not... login...
        #    if good token, cache it.
        # 2. push into headers.
        # pylint: disable-next=invalid-name
        authToken = self.proxyAuth.check_cached_token()
        # pylint: disable-next=consider-using-f-string
        log_debug(5, "Auth token for this machine only! %s" % authToken)
        tokens = []

        # pylint: disable-next=invalid-name
        _oto = rhnFlags.get("outputTransportOptions")
        if "X-RHN-Proxy-Auth" in _oto:
            # pylint: disable-next=consider-using-f-string
            log_debug(5, "    (auth token prior): %s" % repr(_oto["X-RHN-Proxy-Auth"]))
            tokens = _oto["X-RHN-Proxy-Auth"].split(",")

        # list of tokens to be pushed into the headers.
        tokens.append(authToken)
        tokens = [t for t in tokens if t]

        _oto["X-RHN-Proxy-Auth"] = ",".join(tokens)
        # pylint: disable-next=consider-using-f-string
        log_debug(5, "    (auth token after): %s" % repr(_oto["X-RHN-Proxy-Auth"]))

        if self.fullRequestURL and self.authToken:
            # For RHEL Minions the auth token is not included in the fullRequestURL
            # because it was provided as 'X-Mgr-Auth' header.
            # In this case We need to append it to the URL to check if accessible
            # with the given auth token.
            # pylint: disable-next=invalid-name
            checkURL = self.fullRequestURL
            if not self.authToken in checkURL:
                # pylint: disable-next=invalid-name
                checkURL += "?" + self.authToken
            if not suseLib.accessible(checkURL):
                return apache.HTTP_FORBIDDEN
        if self.authToken:
            _oto["X-Suse-Auth-Token"] = self.authToken

        log_debug(3, "Trying to connect to parent")

        # Loops twice? Here's why:
        #   o If no errors, the loop is broken and we move on.
        #   o If an error, either we get a new token and try again,
        #     or we get a critical error and we fault.
        # pylint: disable-next=invalid-name,unused-variable
        for _i in range(2):
            self._connectToParent()  # part 1

            log_debug(4, "after _connectToParent")
            # Add the proxy version
            rhnFlags.get("outputTransportOptions")["X-RHN-Proxy-Version"] = str(
                _PROXY_VERSION
            )

            status = self._serverCommo()  # part 2

            # check for proxy authentication blowup.
            # pylint: disable-next=invalid-name
            respHeaders = self.responseContext.getHeaders()
            if not respHeaders or "X-RHN-Proxy-Auth-Error" not in respHeaders:
                # No proxy auth errors
                # XXX: need to verify that with respHeaders ==
                #      None that is is correct logic. It should be -taw
                break

            # pylint: disable-next=use-maxsplit-arg
            error = str(respHeaders["X-RHN-Proxy-Auth-Error"]).split(":")[0]

            # If a proxy other than this one needs to update its auth token
            # pass the error on up to it
            if (
                "X-RHN-Proxy-Auth-Origin" in respHeaders
                and respHeaders["X-RHN-Proxy-Auth-Origin"] != self.proxyAuth.hostname
            ):
                break

            # Expired/invalid auth token; go through the loop once again
            if error == "1003":  # invalid token
                msg = "SUSE Multi-Linux Manager Proxy Session Token INVALID -- bad!"
                log_error(msg)
                log_debug(0, msg)
            elif error == "1004":
                log_debug(
                    2,
                    "SUSE Multi-Linux Manager Proxy Session Token expired, acquiring new one.",
                )
            else:  # this should never happen.
                msg = (
                    # pylint: disable-next=consider-using-f-string
                    "SUSE Multi-Linux Manager Proxy login failed, error code is %s"
                    % error
                )
                log_error(msg)
                log_debug(0, msg)
                raise rhnFault(
                    1000,
                    _(
                        "SUSE Multi-Linux Manager Proxy error (issues with proxy login). "
                        "Please contact your system administrator."
                    ),
                )

            # Forced refresh of the proxy token
            rhnFlags.get("outputTransportOptions")["X-RHN-Proxy-Auth"] = (
                self.proxyAuth.check_cached_token(1)
            )
        else:  # for
            # The token could not be aquired
            log_debug(0, "Unable to acquire proxy authentication token")
            raise rhnFault(
                1000,
                _(
                    "SUSE Multi-Linux Manager Proxy error (unable to acquire proxy auth token). "
                    "Please contact your system administrator."
                ),
            )

        # Support for yum byte-range
        if status not in (apache.OK, apache.HTTP_PARTIAL_CONTENT):
            # pylint: disable-next=consider-using-f-string
            log_debug(1, "Leaving handler with status code %s" % status)
            return status

        self.__handleAction(self.responseContext.getHeaders())

        return self._clientCommo()

    def _prepHandler(self):
        """prep handler and check PROXY_AUTH's expiration."""
        SharedHandler._prepHandler(self)

    @staticmethod
    def _split_ks_url(req):
        """read kickstart options from incoming url
        URIs we care about look something like:
        /ks/dist/session/2xfe7113bc89f359001280dee1f4a020bc/
            ks-rhel-x86_64-server-6-6.5/Packages/rhnsd-4.9.3-2.el6.x86_64.rpm
        /ks/dist/ks-rhel-x86_64-server-6-6.5/Packages/
            rhnsd-4.9.3-2.el6.x86_64.rpm
        /ks/dist/org/1/ks-rhel-x86_64-server-6-6.5/Packages/
            rhnsd-4.9.3-2.el6.x86_64.rpm
        /ks/dist/ks-rhel-x86_64-server-6-6.5/child/sherr-child-1/Packages/
            rhnsd-4.9.3-2.el6.x86_64.rpm
        """
        args = req.path_info.split("/")
        params = {"child": None, "session": None, "orgId": None, "file": args[-1]}
        action = None
        if args[2] == "org":
            params["orgId"] = args[3]
            kickstart = args[4]
            if args[5] == "Packages":
                action = "getPackage"
        elif args[2] == "session":
            params["session"] = args[3]
            kickstart = args[4]
            if args[5] == "Packages":
                action = "getPackage"
        elif args[3] == "child":
            params["child"] = args[4]
            kickstart = args[2]
            if args[5] == "Packages":
                action = "getPackage"
        else:
            kickstart = args[2]
            if args[3] == "Packages":
                action = "getPackage"
        return kickstart, action, params

    @staticmethod
    def _split_url(req):
        """read url from incoming url and return (req_type, channel, action, params)
        URI should look something like:
        /GET-REQ/rhel-i386-server-5/getPackage/autofs-5.0.1-0.rc2.143.el5_5.6.i386.rpm
        """
        args = req.path_info.split("/")
        if len(args) < 5:
            return (None, None, None, None)

        return (args[1], args[2], args[3], args[4:])

    # --- PRIVATE METHODS ---

    # pylint: disable-next=invalid-name
    def __handleAction(self, headers):
        log_debug(2)
        # Check if proxy is interested in this action, and execute any
        # action required:
        if "X-RHN-Action" not in headers:
            # Don't know what to do
            return

        # pylint: disable-next=consider-using-f-string
        log_debug(2, "Action is %s" % headers["X-RHN-Action"])
        # Now, is it a login? If so, cache the session token.
        if headers["X-RHN-Action"] != "login":
            # Don't care
            return

        # A login. Cache the session token
        self.__cacheClientSessionToken(headers)

    # pylint: disable-next=invalid-name
    def __local_GET_handler(self, req):
        """GETs: authenticate user, and service local GETs.
        if not a local fetch, return None
        """

        # pylint: disable-next=consider-using-f-string
        log_debug(2, "request method: %s" % req.method)
        # Early test to check if this is a request the proxy can handle
        # Can we serve this request?
        if req.method != "GET" or not CFG.PKG_DIR:
            # Don't know how to handle this
            return None

        # Tiny-url kickstart requests (for server kickstarts, aka not profiles)
        # have been name munged and we've already sent a HEAD request to the
        # Satellite to get a checksum for the rpm so we can find it in the
        # squid cache.
        # Original url looks like /ty/bSWE7qIq/Packages/policycoreutils-2.0.83
        #  -19.39.el6.x86_64.rpm which gets munged to be /ty-cksm/ddb43838ad58
        #  d74dc95badef543cd96459b8bb37ff559339de58ec8dbbd1f18b/Packages/polic
        #  ycoreutils-2.0.83-19.39.el6.x86_64.rpm
        args = req.path_info.split("/")
        # urlparse returns a ParseResult, index 2 is the path
        if re.search("^" + URI_PREFIX_KS_CHECKSUM, urlparse(self.rhnParent)[2]):
            # We *ONLY* locally cache RPMs for kickstarts
            if len(args) < 3 or args[2] != "Packages":
                return None
            req_type = "tinyurl"
            reqident = args[1]
            reqaction = "getPackage"
            reqparams = [args[-1]]
            self.cachedClientInfo = UserDictCase()
        elif len(args) > 3 and args[1] == "dist":
            # This is a kickstart request
            req_type = "ks-dist"
            reqident, reqaction, reqparams = self._split_ks_url(req)
            self.cachedClientInfo = UserDictCase()
        else:
            # Some other type of request
            (req_type, reqident, reqaction, reqparams) = self._split_url(req)
            if reqaction == "getPackage":
                reqparams = tuple([os.path.join(*reqparams)])

        if req_type is None or (
            req_type not in ["$RHN", "GET-REQ", "tinyurl", "ks-dist"]
        ):
            # not a traditional RHN GET (i.e., it is an arbitrary get)
            # XXX: there has to be a more elegant way to do this
            return None

        # kickstarts don't auth...
        if req_type in ["$RHN", "GET-REQ"]:
            # --- AUTH. CHECK:
            # Check client authentication. If not authenticated, throw
            # an exception.
            token = self.__getSessionToken()
            self.__checkAuthSessionTokenCache(token, reqident)

            # Is this channel local?
            for ch in self.authChannels:
                # pylint: disable-next=invalid-name,invalid-name,invalid-name,unused-variable,unused-variable
                channel, _version, _isBaseChannel, isLocalChannel = ch[:4]
                if channel == reqident and str(isLocalChannel) == "1":
                    # Local channel
                    break
            else:
                # Not a local channel
                return None

        # --- LOCAL GET:
        # pylint: disable-next=invalid-name
        localFlist = CFG.PROXY_LOCAL_FLIST or []

        if reqaction not in localFlist:
            # Not an action we know how to handle
            return None

        # We have a match; we'll try to serve packages from the local
        # repository
        log_debug(3, "Retrieve from local repository.")
        log_debug(3, req_type, reqident, reqaction, reqparams)
        result = self.__callLocalRepository(req_type, reqident, reqaction, reqparams)
        if result is None:
            log_debug(3, "Not available locally; will try higher up the chain.")
        else:
            # Signal that we have to XMLRPC encode the response in apacheHandler
            rhnFlags.set("NeedEncoding", 1)

        return result

    @staticmethod
    # pylint: disable-next=invalid-name
    def __getSessionToken():
        """Get/test-for session token in headers (rhnFlags)"""
        log_debug(2)
        if not rhnFlags.test("AUTH_SESSION_TOKEN"):
            raise rhnFault(33, "Missing session token")
        return rhnFlags.get("AUTH_SESSION_TOKEN")

    # pylint: disable-next=invalid-name
    def __cacheClientSessionToken(self, headers):
        """pull session token from headers and push to caching daemon."""

        log_debug(2)
        # Get the server ID
        if "X-RHN-Server-ID" not in headers:
            log_debug(3, "Client server ID not found in headers")
            # XXX: no client server ID in headers, should we care?
            # raise rhnFault(1000, _("Client Server ID not found in headers!"))
            return None
        # pylint: disable-next=invalid-name
        serverId = "X-RHN-Server-ID"

        self.clientServerId = headers[serverId]
        token = UserDictCase()

        # The session token contains everything that begins with
        # "x-rhn-auth"
        prefix = "x-rhn-auth"
        l = len(prefix)
        # pylint: disable-next=invalid-name
        tokenKeys = [x for x in list(headers.keys()) if x[:l].lower() == prefix]
        for k in tokenKeys:
            if k.lower() == "x-rhn-auth-channels":
                # Multivalued header
                # values = headers.getHeaderValues(k)
                values = self._get_header(k)
                token[k] = [x.split(":") for x in values]
            else:
                # Single-valued header
                token[k] = headers[k]

        # Dump the proxy's clock skew in the dict
        # pylint: disable-next=invalid-name
        serverTime = float(token["X-RHN-Auth-Server-Time"])
        token["X-RHN-Auth-Proxy-Clock-Skew"] = time.time() - serverTime

        # Save the token
        self.proxyAuth.set_client_token(self.clientServerId, token)
        return token

    # pylint: disable-next=invalid-name
    def __callLocalRepository(self, req_type, identifier, funct, params):
        """Contacts the local repository and retrieves files"""

        log_debug(2, req_type, identifier, funct, params)

        # NOTE: X-RHN-Proxy-Auth described in broker/rhnProxyAuth.py
        if "X-RHN-Proxy-Auth" in rhnFlags.get("outputTransportOptions"):
            self.cachedClientInfo["X-RHN-Proxy-Auth"] = rhnFlags.get(
                "outputTransportOptions"
            )["X-RHN-Proxy-Auth"]
        if "Host" in rhnFlags.get("outputTransportOptions"):
            self.cachedClientInfo["Host"] = rhnFlags.get("outputTransportOptions")[
                "Host"
            ]

        if req_type == "tinyurl":
            try:
                rep = rhnRepository.TinyUrlRepository(
                    identifier,
                    self.cachedClientInfo,
                    rhnParent=self.rhnParent,
                    rhnParentXMLRPC=self.rhnParentXMLRPC,
                    httpProxy=self.httpProxy,
                    httpProxyUsername=self.httpProxyUsername,
                    httpProxyPassword=self.httpProxyPassword,
                    caChain=self.caChain,
                    systemId=self.proxyAuth.get_system_id(),
                )
            except rhnRepository.NotLocalError:
                return None
        elif req_type == "ks-dist":
            try:
                rep = rhnRepository.KickstartRepository(
                    identifier,
                    self.cachedClientInfo,
                    rhnParent=self.rhnParent,
                    rhnParentXMLRPC=self.rhnParentXMLRPC,
                    httpProxy=self.httpProxy,
                    httpProxyUsername=self.httpProxyUsername,
                    httpProxyPassword=self.httpProxyPassword,
                    caChain=self.caChain,
                    orgId=params["orgId"],
                    child=params["child"],
                    session=params["session"],
                    systemId=self.proxyAuth.get_system_id(),
                )
            except rhnRepository.NotLocalError:
                return None
            params = [params["file"]]
        else:
            # Find the channel version
            version = None
            for c in self.authChannels:
                ch, ver = c[:2]
                if ch == identifier:
                    version = ver
                    break

            # We already know he's subscribed to this channel
            # channel, so the version is non-null
            rep = rhnRepository.Repository(
                identifier,
                version,
                self.cachedClientInfo,
                rhnParent=self.rhnParent,
                rhnParentXMLRPC=self.rhnParentXMLRPC,
                httpProxy=self.httpProxy,
                httpProxyUsername=self.httpProxyUsername,
                httpProxyPassword=self.httpProxyPassword,
                caChain=self.caChain,
            )

        f = rep.get_function(funct)
        if not f:
            raise rhnFault(
                1000,
                _(
                    "SUSE Multi-Linux Manager Proxy configuration error: invalid function %s"
                )
                % funct,
            )

        # pylint: disable-next=consider-using-f-string
        log_debug(3, "Calling %s(%s)" % (funct, params))
        if params is None:
            params = ()
        try:
            ret = f(*params)
        except rhnRepository.NotLocalError:
            # The package is not local
            return None

        return ret

    # pylint: disable-next=invalid-name
    def __checkAuthSessionTokenCache(self, token, channel):
        """Authentication / authorize the channel"""

        log_debug(2, token, channel)
        self.clientServerId = token["X-RHN-Server-ID"]

        # pylint: disable-next=invalid-name
        cachedToken = self.proxyAuth.get_client_token(self.clientServerId)
        if not cachedToken:
            # maybe client logged in through different load-balanced proxy
            # try to update the cache an try again
            # pylint: disable-next=invalid-name
            cachedToken = self.proxyAuth.update_client_token_if_valid(
                self.clientServerId, token
            )

            if not cachedToken:
                msg = (
                    _("Invalid session key - server ID not found in cache: %s")
                    % self.clientServerId
                )
                log_error(msg)
                raise rhnFault(33, msg)

        self.cachedClientInfo = UserDictCase(cachedToken)

        # pylint: disable-next=invalid-name
        clockSkew = self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]
        del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]

        # Add the server id
        self.authChannels = self.cachedClientInfo["X-RHN-Auth-Channels"]
        del self.cachedClientInfo["X-RHN-Auth-Channels"]
        self.cachedClientInfo["X-RHN-Server-ID"] = self.clientServerId
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Retrieved token from cache: %s" % self.cachedClientInfo)

        # pylint: disable-next=invalid-name
        authChannels = [x[0] for x in self.authChannels]
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Auth channels: '%s'" % authChannels)

        # Compare the two things
        if (
            not _dictEquals(token, self.cachedClientInfo, ["X-RHN-Auth-Channels"])
            or channel not in authChannels
        ):
            # Maybe the client logged in through a different load-balanced
            # proxy? Check validity of the token the client passed us.
            # pylint: disable-next=invalid-name
            updatedToken = self.proxyAuth.update_client_token_if_valid(
                self.clientServerId, token
            )
            # fix up the updated token the same way we did above
            if updatedToken:
                self.cachedClientInfo = UserDictCase(updatedToken)
                # pylint: disable-next=invalid-name
                clockSkew = self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]
                del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]
                self.authChannels = self.cachedClientInfo["X-RHN-Auth-Channels"]
                del self.cachedClientInfo["X-RHN-Auth-Channels"]
                self.cachedClientInfo["X-RHN-Server-ID"] = self.clientServerId
                # pylint: disable-next=consider-using-f-string
                log_debug(4, "Retrieved token from cache: %s" % self.cachedClientInfo)

            if not updatedToken or not _dictEquals(
                token, self.cachedClientInfo, ["X-RHN-Auth-Channels"]
            ):
                log_debug(3, "Session tokens different")
                raise rhnFault(33)  # Invalid session key

        # Check the expiration
        # pylint: disable-next=invalid-name
        serverTime = float(token["X-RHN-Auth-Server-Time"])
        offset = float(token["X-RHN-Auth-Expire-Offset"])
        if time.time() > serverTime + offset + clockSkew:
            log_debug(3, "Session token has expired")
            raise rhnFault(34)  # Session key has expired

        # Only autherized channels are the ones stored in the cache.
        # pylint: disable-next=invalid-name
        authChannels = [x[0] for x in self.authChannels]
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Auth channels: '%s'" % authChannels)
        # Check the authorization
        if channel not in authChannels:
            # pylint: disable-next=consider-using-f-string
            log_debug(4, "Not subscribed to channel %s; unauthorized" % channel)
            raise rhnFault(35, _("Unauthorized channel access requested."))


# pylint: disable-next=invalid-name
def _dictEquals(d1, d2, exceptions=None):
    """Function that compare two dictionaries, ignoring certain keys"""
    exceptions = [x.lower() for x in (exceptions or [])]
    for k, v in list(d1.items()):
        if k.lower() in exceptions:
            continue
        if k not in d2 or d2[k] != v:
            return 0
    for k, v in list(d2.items()):
        if k.lower() in exceptions:
            continue
        if k not in d1 or d1[k] != v:
            return 0
    return 1


# ===============================================================================
0707010000000B000081B400000000000000000000000168DD3ED3000060CA000000000000000000000000000000000000002800000000spacewalk-proxy/broker/rhnRepository.py# pylint: disable=missing-module-docstring,invalid-name
# rhnRepository.py                         - Perform local repository functions.
# -------------------------------------------------------------------------------
# This module contains the functionality for providing local packages.
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# -------------------------------------------------------------------------------

## language imports
import os
import time
import glob

try:
    # python 3
    import pickle as cPickle
except ImportError:
    # python 2
    import cPickle
import sys
from operator import truth

try:
    #  python 2
    import xmlrpclib
except ImportError:
    #  python3
    import xmlrpc.client as xmlrpclib

## local imports
from rhn import rpclib

## common imports
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnConfig import CFG
from spacewalk.common import rhnRepository
from spacewalk.common.rhnTranslate import _
from uyuni.common.rhnLib import parseRPMName
from uyuni.common.usix import raise_with_tb


PKG_LIST_DIR = os.path.join(CFG.PKG_DIR, "list")
PREFIX = "rhn"


class NotLocalError(Exception):
    pass


class Repository(rhnRepository.Repository):
    # pylint: disable=R0902

    """Proxy local package repository lookup and manipulation code."""

    def __init__(
        self,
        channelName,
        channelVersion,
        clientInfo,
        rhnParent=None,
        rhnParentXMLRPC=None,
        httpProxy=None,
        httpProxyUsername=None,
        httpProxyPassword=None,
        caChain=None,
    ):

        log_debug(3, channelName)
        rhnRepository.Repository.__init__(self, channelName)
        self.functions = CFG.PROXY_LOCAL_FLIST
        self.channelName = channelName
        # pylint: disable-next=invalid-name
        self.channelVersion = channelVersion
        # pylint: disable-next=invalid-name
        self.clientInfo = clientInfo
        # pylint: disable-next=invalid-name
        self.rhnParent = rhnParent
        # pylint: disable-next=invalid-name
        self.rhnParentXMLRPC = rhnParentXMLRPC
        # pylint: disable-next=invalid-name
        self.httpProxy = httpProxy
        # pylint: disable-next=invalid-name
        self.httpProxyUsername = httpProxyUsername
        # pylint: disable-next=invalid-name
        self.httpProxyPassword = httpProxyPassword
        # pylint: disable-next=invalid-name
        self.caChain = caChain

    def getPackagePath(
        self, pkgFilename, redirect=0
    ):  # pylint: disable=unused-argument
        """OVERLOADS getPackagePath in common/rhnRepository.
        Returns complete path to an RPM file.
        """

        log_debug(3, pkgFilename)
        # pylint: disable-next=invalid-name,consider-using-f-string
        mappingName = "package_mapping:%s:" % self.channelName
        mapping = self._cacheObj(
            mappingName, self.channelVersion, self.__channelPackageMapping, ()
        )

        # If the file name has parameters, it's a different kind of package.
        # Determine the architecture requested so we can construct an
        # appropriate filename.
        if isinstance(pkgFilename, list):
            arch = pkgFilename[3]
            # Not certain if anything is needed here for Debian, but since what I've tested
            # works.   Leave it alone.
            if isSolarisArch(arch):
                # pylint: disable-next=consider-using-f-string
                pkgFilename = "%s-%s-%s.%s.pkg" % (
                    pkgFilename[0],
                    pkgFilename[1],
                    pkgFilename[2],
                    pkgFilename[3],
                )

        if pkgFilename not in mapping:
            # pylint: disable-next=consider-using-f-string
            log_debug(3, "Package not in mapping: %s" % pkgFilename)
            raise NotLocalError
        # A list of possible file paths. Always a list, channel mappings are
        # cleared on package upgrade so we don't have to worry about the old
        # behavior of returning a string
        # pylint: disable-next=invalid-name
        filePaths = mapping[pkgFilename]
        # Can we see a file at any of the possible filepaths?
        # pylint: disable-next=invalid-name
        for filePath in filePaths:
            # pylint: disable-next=invalid-name,consider-using-f-string
            filePath = "%s/%s" % (CFG.PKG_DIR, filePath)
            log_debug(4, "File path", filePath)
            if os.access(filePath, os.R_OK):
                return filePath
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Package not found locally: %s" % pkgFilename)
        raise NotLocalError(filePaths[0], pkgFilename)

    # pylint: disable-next=arguments-renamed
    def getSourcePackagePath(self, pkgFilename):
        """OVERLOADS getSourcePackagePath in common/rhnRepository.
        snag src.rpm and nosrc.rpm from local repo, after ensuring
        we are authorized to fetch it.
        """

        log_debug(3, pkgFilename)
        if not (pkgFilename.endswith(".src.rpm") or pkgFilename.endswith(".nosrc.rpm")):
            raise rhnFault(17, _("Invalid SRPM package requested: %s") % pkgFilename)

        # Connect to the server to get an authorization for downloading this
        # package
        server = rpclib.Server(
            self.rhnParentXMLRPC,
            proxy=self.httpProxy,
            username=self.httpProxyUsername,
            password=self.httpProxyPassword,
        )
        if self.caChain:
            server.add_trusted_cert(self.caChain)

        try:
            retval = server.proxy.package_source_in_channel(
                pkgFilename, self.channelName, self.clientInfo
            )
        except xmlrpclib.Fault as e:
            raise_with_tb(
                rhnFault(1000, _("Error retrieving source package: %s") % str(e)),
                sys.exc_info()[2],
            )

        if not retval:
            raise rhnFault(17, _("Invalid SRPM package requested: %s") % pkgFilename)

        if not pkgFilename.endswith(".src.rpm"):
            # We already know the filename ends in .src.rpm
            nvrea = list(parseRPMName(pkgFilename[:-8]))
            nvrea.append("src")
        else:
            # We already know the filename ends in .nosrc.rpm
            # otherwise we did not pass first if in this func
            nvrea = list(parseRPMName(pkgFilename[:-10]))
            nvrea.append("nosrc")

        # pylint: disable-next=invalid-name
        filePaths = computePackagePaths(nvrea, source=1, prepend=PREFIX)
        # pylint: disable-next=invalid-name
        for filePath in filePaths:
            # pylint: disable-next=invalid-name,consider-using-f-string
            filePath = "%s/%s" % (CFG.PKG_DIR, filePath)
            log_debug(4, "File path", filePath)
            if os.access(filePath, os.R_OK):
                return filePath
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Source package not found locally: %s" % pkgFilename)
        raise NotLocalError(filePaths[0], pkgFilename)

    # pylint: disable-next=invalid-name,invalid-name,invalid-name
    def _cacheObj(self, fileName, version, dataProducer, params=None):
        """The real workhorse for all flavors of listall
        It tries to pull data out of a file; if it doesn't work,
        it calls the data producer with the specified params to generate
        the data, which is also cached.

        Returns a string from a cache file or, if the cache file is not
        there, calls dataProducer to generate the object and caches the
        results
        """

        log_debug(4, fileName, version, params)
        # pylint: disable-next=invalid-name
        fileDir = self._getPkgListDir()
        # pylint: disable-next=invalid-name,consider-using-f-string
        filePath = "%s/%s-%s" % (fileDir, fileName, version)
        if os.access(filePath, os.R_OK):
            try:
                # Slurp the file
                f = open(filePath, "rb")
                data = f.read()
                f.close()
                # pylint: disable-next=invalid-name
                stringObject = cPickle.loads(data)
                return stringObject
            except (IOError, cPickle.UnpicklingError):  # corrupted cache file
                pass  # do nothing, we'll fetch / write it again

        # The file's not there; query the DB or whatever dataproducer used.
        if params is None:
            params = ()
        # pylint: disable-next=invalid-name
        stringObject = dataProducer(*params)
        # Cache the thing
        cache(cPickle.dumps(stringObject, 1), fileDir, fileName, version)
        # Return the string
        return stringObject

    @staticmethod
    # pylint: disable-next=invalid-name
    def _getPkgListDir():
        """Creates and returns the directory for cached lists of packages.
        Used by _cacheObj.

        XXX: Problem exists here. If PKG_LIST_DIR can't be created
        due to ownership... this is bad... need to fix.
        """

        log_debug(3, PKG_LIST_DIR)
        if not os.access(PKG_LIST_DIR, os.R_OK | os.X_OK):
            os.makedirs(PKG_LIST_DIR)
        return PKG_LIST_DIR

    # pylint: disable-next=invalid-name
    def _listPackages(self):
        """Generates a list of objects by calling the function"""
        server = rpclib.GETServer(
            self.rhnParentXMLRPC,
            proxy=self.httpProxy,
            username=self.httpProxyUsername,
            password=self.httpProxyPassword,
            headers=self.clientInfo,
        )
        if self.caChain:
            server.add_trusted_cert(self.caChain)
        return server.listAllPackagesChecksum(self.channelName, self.channelVersion)

    # pylint: disable-next=invalid-name
    def __channelPackageMapping(self):
        """fetch package list on behalf of the client"""

        log_debug(
            6,
            self.rhnParentXMLRPC,
            self.httpProxy,
            self.httpProxyUsername,
            self.httpProxyPassword,
        )
        log_debug(6, self.clientInfo)

        try:
            # pylint: disable-next=invalid-name
            packageList = self._listPackages()
        except xmlrpclib.ProtocolError as e:
            errcode, errmsg = rpclib.reportError(e.headers)
            raise_with_tb(
                rhnFault(
                    1000,
                    # pylint: disable-next=consider-using-f-string
                    "SpacewalkProxy error (xmlrpclib.ProtocolError): "
                    "errode=%s; errmsg=%s" % (errcode, errmsg),
                ),
                sys.exc_info()[2],
            )

        # Hash the list
        # pylint: disable-next=invalid-name
        _hash = {}
        for package in packageList:
            arch = package[4]

            extension = "rpm"
            if isSolarisArch(arch):
                extension = "pkg"
            if isDebianArch(arch):
                extension = "deb"

            # pylint: disable-next=consider-using-f-string
            filename = "%s-%s-%s.%s.%s" % (
                package[0],
                package[1],
                package[2],
                package[4],
                extension,
            )
            # if the package contains checksum info
            if len(package) > 6:
                # pylint: disable-next=invalid-name
                filePaths = computePackagePaths(
                    package, source=0, prepend=PREFIX, checksum=package[7]
                )
            else:
                # pylint: disable-next=invalid-name
                filePaths = computePackagePaths(package, source=0, prepend=PREFIX)
            _hash[filename] = filePaths

        if CFG.DEBUG > 4:
            log_debug(
                5,
                # pylint: disable-next=consider-using-f-string
                "Mapping: %s[...snip snip...]%s" % (str(_hash)[:40], str(_hash)[-40:]),
            )
        return _hash


class KickstartRepository(Repository):
    """Kickstarts always end up pointing to a channel that they're getting
    rpms from. Lookup what channel that is and then just use the regular
    repository"""

    def __init__(
        self,
        kickstart,
        clientInfo,
        rhnParent=None,
        rhnParentXMLRPC=None,
        httpProxy=None,
        httpProxyUsername=None,
        httpProxyPassword=None,
        caChain=None,
        orgId=None,
        child=None,
        session=None,
        systemId=None,
    ):
        log_debug(3, kickstart)

        # pylint: disable-next=invalid-name
        self.systemId = systemId
        self.kickstart = kickstart
        # pylint: disable-next=invalid-name
        self.ks_orgId = orgId
        self.ks_child = child
        self.ks_session = session

        # have to look up channel name and version for this kickstart
        # we have no equievanet to the channel version for kickstarts,
        # expire the cache after an hour
        # pylint: disable-next=invalid-name,consider-using-f-string
        fileName = "kickstart_mapping:%s-%s-%s-%s:" % (
            str(kickstart),
            str(orgId),
            str(child),
            str(session),
        )

        mapping = self._lookupKickstart(
            fileName,
            rhnParentXMLRPC,
            httpProxy,
            httpProxyUsername,
            httpProxyPassword,
            caChain,
        )
        Repository.__init__(
            self,
            mapping["channel"],
            mapping["version"],
            clientInfo,
            rhnParent,
            rhnParentXMLRPC,
            httpProxy,
            httpProxyUsername,
            httpProxyPassword,
            caChain,
        )

    # pylint: disable-next=invalid-name
    def _lookupKickstart(
        self,
        # pylint: disable-next=invalid-name
        fileName,
        # pylint: disable-next=invalid-name
        rhnParentXMLRPC,
        # pylint: disable-next=invalid-name
        httpProxy,
        # pylint: disable-next=invalid-name
        httpProxyUsername,
        # pylint: disable-next=invalid-name
        httpProxyPassword,
        # pylint: disable-next=invalid-name
        caChain,
    ):
        # pylint: disable-next=invalid-name
        fileDir = self._getPkgListDir()
        # pylint: disable-next=invalid-name,consider-using-f-string
        filePath = "%s/%s-1" % (fileDir, fileName)
        mapping = None
        if os.access(filePath, os.R_OK):
            try:
                # Slurp the file
                try:
                    # pylint: disable-next=unspecified-encoding
                    with open(filePath, "r") as f:
                        mapping = cPickle.loads(f.read())
                except (UnicodeDecodeError, TypeError):
                    with open(filePath, "rb") as f:
                        mapping = cPickle.loads(f.read())
            except (IOError, cPickle.UnpicklingError):  # corrupt cached file
                mapping = None  # ignore it, we'll get and write it again

        now = int(time.time())
        if not mapping or mapping["expires"] < now:
            # Can't use the normal GETServer handler because there is no client
            # to auth. Instead this is something the Proxy has to be able to
            # do, so read the serverid and send that up.
            server = rpclib.Server(
                rhnParentXMLRPC,
                proxy=httpProxy,
                username=httpProxyUsername,
                password=httpProxyPassword,
            )
            if caChain:
                server.add_trusted_cert(caChain)
            try:
                response = self._getMapping(server)
                mapping = {
                    "channel": str(response["label"]),
                    "version": str(response["last_modified"]),
                    "expires": int(time.time()) + 3600,
                }  # 1 hour from now
            except Exception:
                # something went wrong. Punt, we just won't serve this request
                # locally
                # pylint: disable-next=raise-missing-from
                raise NotLocalError

            # Cache the thing
            cache(cPickle.dumps(mapping, 1), fileDir, fileName, "1")

        return mapping

    def _listPackages(self):
        """Generates a list of objects by calling the function"""
        # Can't use the normal GETServer handler because there is no client
        # to auth. Instead this is something the Proxy has to be able to do,
        # so read the serverid and send that up.
        server = rpclib.Server(
            self.rhnParentXMLRPC,
            proxy=self.httpProxy,
            username=self.httpProxyUsername,
            password=self.httpProxyPassword,
        )
        if self.caChain:
            server.add_trusted_cert(self.caChain)
        # Versionless package listing from Server. This saves us from erroring
        # unnecessarily if the channel has changed since the kickstart mapping.
        # No problem, newer channel listings will work fine with kickstarts
        # unless they have removed the kernel or something, in which case it's
        # not supposed to work.
        # Worst case scenario is that we cache listing using an older version
        # than it actually is, and the next time we serve a file from the
        # regular Repository it'll get replace with the same info but newer
        # version in filename.
        return server.proxy.listAllPackagesKickstart(self.channelName, self.systemId)

    # pylint: disable-next=invalid-name
    def _getMapping(self, server):
        """Generate a hash that tells us what channel this
        kickstart is looking at. We have no equivalent to channel version,
        so expire the cached file after an hour."""
        if self.ks_orgId:
            return server.proxy.getKickstartOrgChannel(
                self.kickstart, self.ks_orgId, self.systemId
            )
        if self.ks_session:
            return server.proxy.getKickstartSessionChannel(
                self.kickstart, self.ks_session, self.systemId
            )
        if self.ks_child:
            return server.proxy.getKickstartChildChannel(
                self.kickstart, self.ks_child, self.systemId
            )
        return server.proxy.getKickstartChannel(self.kickstart, self.systemId)


class TinyUrlRepository(KickstartRepository):
    # pylint: disable=W0233,W0231

    """TinyURL kickstarts have actually already made a HEAD request up to the
    Satellite to to get the checksum for the rpm, however we can't just use
    that data because the epoch information is not in the filename so we'd
    never find files with a non-None epoch. Instead do the same thing we do
    for non-tiny-urlified kickstarts and look up what channel it maps to."""

    def __init__(
        self,
        tinyurl,
        clientInfo,
        rhnParent=None,
        rhnParentXMLRPC=None,
        httpProxy=None,
        httpProxyUsername=None,
        httpProxyPassword=None,
        caChain=None,
        systemId=None,
    ):
        log_debug(3, tinyurl)

        self.systemId = systemId
        self.tinyurl = tinyurl

        # have to look up channel name and version for this kickstart
        # we have no equievanet to the channel version for kickstarts,
        # expire the cache after an hour
        # pylint: disable-next=invalid-name,consider-using-f-string
        fileName = "tinyurl_mapping:%s:" % (str(tinyurl))

        mapping = self._lookupKickstart(
            fileName,
            rhnParentXMLRPC,
            httpProxy,
            httpProxyUsername,
            httpProxyPassword,
            caChain,
        )
        Repository.__init__(
            self,
            mapping["channel"],
            mapping["version"],
            clientInfo,
            rhnParent,
            rhnParentXMLRPC,
            httpProxy,
            httpProxyUsername,
            httpProxyPassword,
            caChain,
        )

    def _getMapping(self, server):
        return server.proxy.getTinyUrlChannel(self.tinyurl, self.systemId)


# pylint: disable-next=invalid-name
def isSolarisArch(arch):
    """
    Returns true if the given arch string represents a solaris architecture.
    """
    return arch.find("solaris") != -1


# pylint: disable-next=invalid-name
def isDebianArch(arch):
    """
    Returns true if the given arch string represents a Debian architecture..
    """
    return arch[-4:] == "-deb"


# pylint: disable-next=invalid-name
def computePackagePaths(nvrea, source=0, prepend="", checksum=None):
    """Finds the appropriate paths, prepending something if necessary"""
    paths = []
    name = nvrea[0]
    release = nvrea[2]

    if source:
        dirarch = "SRPMS"
        pkgarch = "src"
    else:
        dirarch = pkgarch = nvrea[4]

    extension = "rpm"
    if isSolarisArch(pkgarch):
        extension = "pkg"
    if isDebianArch(pkgarch):
        extension = "deb"

    version = nvrea[1]
    epoch = nvrea[3]
    if epoch not in [None, ""]:
        version = str(epoch) + ":" + version
    # The new prefered path template avoides collisions if packages with the
    # same nevra but different checksums are uploaded. It also should be the
    # same as the /var/satellite/redhat/NULL/* paths upstream.
    # We can't reliably look up the checksum for source packages, so don't
    # use it in the source path.
    if checksum and not source:
        checksum_template = prepend + "/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s"
        checksum_template = "/".join(filter(truth, checksum_template.split("/")))
        paths.append(
            checksum_template
            % (
                checksum[:3],
                name,
                version,
                release,
                dirarch,
                checksum,
                name,
                nvrea[1],
                release,
                pkgarch,
                extension,
            )
        )
    template = prepend + "/%s/%s-%s/%s/%s-%s-%s.%s.%s"
    # Sanitize the path: remove duplicated /
    template = "/".join(filter(truth, template.split("/")))
    paths.append(
        template
        % (name, version, release, dirarch, name, nvrea[1], release, pkgarch, extension)
    )
    return paths


# pylint: disable-next=invalid-name
def cache(stringObject, directory, filename, version):
    """Caches stringObject into a file and removes older files"""

    # The directory should be readable, writable, seekable
    if not os.access(directory, os.R_OK | os.W_OK | os.X_OK):
        os.makedirs(directory)
    # pylint: disable-next=invalid-name,consider-using-f-string
    filePath = "%s/%s-%s" % (directory, filename, version)
    # Create a temp file based on the filename, version and stuff
    # pylint: disable-next=consider-using-f-string
    tempfile = "%s-%.20f" % (filePath, time.time())
    # Try to create the temp file
    tries = 10
    while tries > 0:
        # Try to create this new file
        try:
            fd = os.open(tempfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
        except OSError as e:
            if e.errno == 17:
                # File exists; give it another try
                tries = tries - 1
                # pylint: disable-next=consider-using-f-string
                tempfile = tempfile + "%.20f" % time.time()
                continue
            # Another error
            raise
        else:
            # We've got the file; everything's nice and dandy
            break
    else:
        # Could not create the file
        # pylint: disable-next=broad-exception-raised
        raise Exception("Could not create the file")
    # Write the object into the cache
    os.write(fd, stringObject)
    os.close(fd)
    # Now rename the temp file
    os.rename(tempfile, filePath)
    # Expire the cached copies
    # pylint: disable-next=invalid-name,consider-using-f-string
    _list = glob.glob("%s/%s-*" % (directory, filename))
    # pylint: disable-next=invalid-name
    for _file in _list:
        if _file < filePath:
            # Older than this
            os.unlink(_file)
0707010000000C000041FD00000000000000000000000368DD3ED300000000000000000000000000000000000000000000001400000000spacewalk-proxy/etc0707010000000D000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001E00000000spacewalk-proxy/etc/slp.reg.d0707010000000E000081B400000000000000000000000168DD3ED3000001A4000000000000000000000000000000000000003300000000spacewalk-proxy/etc/slp.reg.d/susemanagerproxy.reg#############################################################################
#
# OpenSLP registration file
#
# register SUSE Manager proxy
#
#############################################################################

# Register the SUSE Manager server, if it is running
service:registration.suse:manager://$HOSTNAME/XMLRPC,en,65535
tcp-port=443
type=proxy
description=SUSE Manager Proxy registration URL for clients
0707010000000F000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001B00000000spacewalk-proxy/httpd-conf07070100000010000081B400000000000000000000000168DD3ED3000000BB000000000000000000000000000000000000002400000000spacewalk-proxy/httpd-conf/Makefile# Makefile for installation of the httpd configuration files
#

TOP	= ..

INSTALL_FILES	= $(wildcard *.conf)
INSTALL_DEST	= /etc/httpd/conf.d

EXTRA_DIRS  =

include $(TOP)/Makefile.defs
07070100000011000081B400000000000000000000000168DD3ED3000006F4000000000000000000000000000000000000003400000000spacewalk-proxy/httpd-conf/smlm-proxy-forwards.conf# SUSE Multi-Linux Manager Proxy

# Proxy passes to the server. These are not cached locally!

# Cobbler
ProxyPass /cobbler_api https://{{ SERVER }}/download/cobbler_api
ProxyPassReverse /cobbler_api https://{{ SERVER }}/download/cobbler_api
RewriteRule ^/cblr/svc/op/ks/(.*)$ /download/$0 [R,L]
RewriteRule ^/cblr/svc/op/autoinstall/(.*)$ /download/$0 [R,L]
ProxyPass /cblr https://{{ SERVER }}/cblr
ProxyPassReverse /cblr https://{{ SERVER }}/cblr
ProxyPass /cobbler https://{{ SERVER }}/cobbler
ProxyPassReverse /cobbler https://{{ SERVER }}/cobbler

# redirect API calls to the server
ProxyPass /rhn/manager/api https://{{ SERVER }}/rhn/manager/api
ProxyPassReverse /rhn/manager/api https://{{ SERVER }}/rhn/manager/api

# Anonymous proxy passes to the server, locally cached via internal loop

# Use our squid cache for any proxypass to localhost
ProxyRemote http://localhost http://localhost:8080
# Anything received on proxyInternalLoop gets forwarded to upstream server
ProxyPass /proxyInternalLoop https://{{ SERVER }}
ProxyPassReverse /proxyInternalLoop https://{{ SERVER }}

# Proxy pass following to ourselves via proxyInternalLoop path. With above it will go through our squid to the server
ProxyPass /os-images http://localhost/proxyInternalLoop/os-images
ProxyPass /tftp http://localhost/proxyInternalLoop/tftp
ProxyPass /saltboot http://localhost/proxyInternalLoop/saltboot
ProxyPass /pub http://localhost/proxyInternalLoop/pub

# Reverses, needed for correct 301 responses to point to the proxy
ProxyPassReverse /os-images http://localhost/proxyInternalLoop/os-images
ProxyPassReverse /tftp http://localhost/proxyInternalLoop/tftp
ProxyPassReverse /saltboot http://localhost/proxyInternalLoop/saltboot
ProxyPassReverse /pub http://localhost/proxyInternalLoop/pub
07070100000012000081B400000000000000000000000168DD3ED300000608000000000000000000000000000000000000003500000000spacewalk-proxy/httpd-conf/spacewalk-proxy-wsgi.conf#
# Spacewalk proxy
#

<Directory /usr/share/rhn>
    SetEnv HANDLER_TYPE "proxy-broker"
    <IfVersion <= 2.2>
        Order allow,deny
        Allow from all
    </IfVersion>
    <IfVersion >= 2.4>
        Require all granted
    </IfVersion>
</Directory>

WSGIPythonPath "/usr/share/rhn"

<IfVersion >= 2.4>
    <Directory /usr/share/rhn/wsgi>
        Require all granted
    </Directory>
</IfVersion>

<LocationMatch "^/*">
    DirectoryIndex index.html index.htm index.html.var index.shtml index.php index.php4 index.php3 index.phtml index.cgi
</LocationMatch>

# Spacewalk proxy broker

# Allow the Authorization header to be passed to the proxy script
WSGIPassAuthorization On

# RPC STUFF
WSGIScriptAlias /rhn/manager/download /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /XMLRPC /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /rpc /usr/share/rhn/wsgi/xmlrpc.py
# rhnpush
WSGIScriptAlias /APP /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /PACKAGE-PUSH /usr/share/rhn/wsgi/xmlrpc.py
# kickstarts via cobbler
WSGIScriptAlias /download /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /ty /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /ty-cksm /usr/share/rhn/wsgi/xmlrpc.py
# bare metal kickstart
WSGIScriptAlias /ks /usr/share/rhn/wsgi/xmlrpc.py

# others
WSGIScriptAlias /SAT /usr/share/rhn/wsgi/xmlrpc.py
WSGIScriptAlias /SAT-DUMP-INTERNAL /usr/share/rhn/wsgi/xmlrpc.py

# Spacewalk proxy redirect
WSGIScriptAlias /XMLRPC_REDIRECT /usr/share/rhn/wsgi/xmlrpc_redirect.py
WSGIScriptAlias /XMLRPC_SSL /usr/share/rhn/wsgi/xmlrpc_redirect.py

07070100000013000081B400000000000000000000000168DD3ED30000048D000000000000000000000000000000000000003000000000spacewalk-proxy/httpd-conf/spacewalk-proxy.conf#
# SUSE Multi-Linux Manager proxy HTTP configuration
#

<IfModule prefork.c>
    # bug #503187
    MaxRequestsPerChild  200
    # bsc#1244552
    MaxRequestWorkers 150
</IfModule>

<Directory "/srv/www/htdocs/docs/*">
    SetEnv HANDLER_TYPE "proxy-docs"
    Options FollowSymLinks
    <IfVersion <= 2.2>
        Order allow,deny
        Allow from all
    </IfVersion>
    <IfVersion >= 2.4>
        Require all granted
    </IfVersion>
</Directory>

<LocationMatch "^/docs/*">
    SetHandler None
</LocationMatch>

<LocationMatch "^/icons/*">
    SetHandler None
</LocationMatch>

<LocationMatch "^/error/*">
    SetHandler None
</LocationMatch>

<LocationMatch "^/$">
    SetHandler None
</LocationMatch>

<IfModule mod_rewrite.c>
   RewriteEngine on

   # Disable TRACE and TRACK
   RewriteCond %{REQUEST_METHOD} ^(TRACE|TRACK)
   RewriteRule .* - [F]

   # Redirect some http page to https for security reasons
   RewriteCond %{SERVER_PORT} 80
   RewriteRule ^/rhn/?$ https://%{SERVER_NAME}/rhn/manager/login  [R,L]
</IfModule>

SSLProxyEngine on

# Uncomment to enable HSTS
# Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
07070100000014000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001A00000000spacewalk-proxy/logrotate07070100000015000081B400000000000000000000000168DD3ED3000000CE000000000000000000000000000000000000002300000000spacewalk-proxy/logrotate/Makefile# Makefile for installation of the logrotation configuration files
#

# what is the backend top dir
TOP	= ..

INSTALL_FILES	= $(wildcard rhn-*)
INSTALL_DEST	= /etc/logrotate.d

include $(TOP)/Makefile.defs
07070100000016000081B400000000000000000000000168DD3ED3000000B5000000000000000000000000000000000000002B00000000spacewalk-proxy/logrotate/rhn-proxy-broker# /etc/logrotate.d/rhn-proxy-broker
#

/var/log/rhn/rhn_proxy_broker.log {
    rotate 5
    copytruncate
    compress
    notifempty
    size 10M
    missingok
    su wwwrun www
}

07070100000017000081B400000000000000000000000168DD3ED3000000B9000000000000000000000000000000000000002D00000000spacewalk-proxy/logrotate/rhn-proxy-redirect# /etc/logrotate.d/rhn-proxy-redirect
#

/var/log/rhn/rhn_proxy_redirect.log {
    rotate 5
    copytruncate
    compress
    notifempty
    size 10M
    missingok
    su wwwrun www
}

07070100000018000081B400000000000000000000000168DD3ED3000000D8000000000000000000000000000000000000002800000000spacewalk-proxy/mgr-proxy-ssh-force-cmd#!/bin/bash -fue
set -- $SSH_ORIGINAL_COMMAND
cmd="$1"
shift
case "$cmd" in
  '/usr/bin/scp'|'/usr/bin/ssh'|'cat'|'/usr/bin/ssh-keygen') exec "$cmd" "$@" ;;
  *) echo "ERROR: command not allowed" >&2; exit 1 ;;
esac
07070100000019000081FD00000000000000000000000168DD3ED3000013F6000000000000000000000000000000000000002800000000spacewalk-proxy/mgr-proxy-ssh-push-init#!/bin/bash

print_help() {
    cat <<HELP
USAGE: proxy-ssh-push-init [options]

options:
  -k path to existing key
  -s only configure sshd and exit
  -a only authorize parent key and exit
  -h show this help message and exit
HELP
    exit 1
}

SYSCONFIG_DIR=/etc/sysconfig/rhn
UP2DATE_FILE=$SYSCONFIG_DIR/up2date
RHN_PARENT=$(awk -F= '/serverURL=/ {split($2, a, "/")} END { print a[3]}' $UP2DATE_FILE)
PROTO=$(awk -F= '/serverURL=/ {split($2, a, "/"); split(a[1], b, ":")} END { print b[1]}' $UP2DATE_FILE)
HTMLPUB_DIR=/srv/www/htdocs/pub

SSH_PUSH_KEY_FILE="id_susemanager_ssh_push"
SSH_PUSH_USER="mgrsshtunnel"
SSH_PUSH_USER_HOME="/var/lib/spacewalk/$SSH_PUSH_USER"
SSH_PUSH_KEY_DIR="$SSH_PUSH_USER_HOME/.ssh"

generate_or_import_ssh_push_key() {
    # create user if needed
    getent group $SSH_PUSH_USER >/dev/null || groupadd -r $SSH_PUSH_USER
    getent passwd $SSH_PUSH_USER >/dev/null || useradd -r -g $SSH_PUSH_USER -m -d $SSH_PUSH_USER_HOME -c "susemanager ssh push tunnel" $SSH_PUSH_USER

    # create .ssh dir in home and set permissions
    mkdir -p $SSH_PUSH_KEY_DIR
    chown $SSH_PUSH_USER:$SSH_PUSH_USER $SSH_PUSH_KEY_DIR
    chmod 700 $SSH_PUSH_KEY_DIR

    # backup first any existing keys
    if [ -f $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE} ]; then
       local TSTMP=$(date +%Y%m%d%H%M)
       mv $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.${TSTMP}
       mv $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.pub $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.pub.${TSTMP}
    fi

    # import existing or generate new ssh key for this proxy
    if [ "$USE_EXISTING_SSH_PUSH_KEY" -eq "1" ]; then
        if [[ -z "$EXISTING_SSH_KEY" || ( ! -r "$EXISTING_SSH_KEY" || ! -r "${EXISTING_SSH_KEY}.pub" ) ]]; then
            echo "Key $EXISTING_SSH_KEY not found."
            exit 1
        fi
        echo "Copying SSH keys to ${SSH_PUSH_KEY_DIR}."
        cp $EXISTING_SSH_KEY $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE
        cp ${EXISTING_SSH_KEY}.pub $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.pub
    else
        echo "Generating new SSH key for ssh-push minions."
        ssh-keygen -q -N '' -C "susemanager-ssh-push" -f $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE
    fi
    # change owner to SSH_PUSH_USER
    chown $SSH_PUSH_USER:$SSH_PUSH_USER $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE
    chmod 600 $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE
    chown $SSH_PUSH_USER:$SSH_PUSH_USER $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE.pub
    chmod 644 $SSH_PUSH_KEY_DIR/$SSH_PUSH_KEY_FILE.pub

    # copy the public key to apache's pub dir
    cp $SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.pub ${HTMLPUB_DIR}/
}

authorize_parent_ssh_push_key() {
    # Fetch key from parent and add it to authorized_keys
    local AUTH_KEYS="$SSH_PUSH_KEY_DIR/authorized_keys"
    local TMP_PUSH_KEY_FILE="$SSH_PUSH_KEY_DIR/${SSH_PUSH_KEY_FILE}.pub.tmp"
    rm -f $TMP_PUSH_KEY_FILE
    local PROXY_KEY_URL="$PROTO://$RHN_PARENT/pub/${SSH_PUSH_KEY_FILE}.pub"
    local SERVER_KEY_URL="$PROTO://$RHN_PARENT/rhn/manager/download/saltssh/pubkey"
    echo "Fetching public ssh-push key from $RHN_PARENT."
    local CURL_RESPONSE=$(curl --write-out %{http_code} --silent --output $TMP_PUSH_KEY_FILE $PROXY_KEY_URL)
    if [ "$CURL_RESPONSE" == "404" ]; then
        # parent is a Manager server
        CURL_RESPONSE=$(curl --write-out %{http_code} --silent --output $TMP_PUSH_KEY_FILE $SERVER_KEY_URL)
    fi
    if [ "$CURL_RESPONSE" != "200" ]; then
        echo "Could not retrieve ssh-push key. curl failed with HTTP response code ${CURL_RESPONSE}."
        echo "Check connectivity to the parent server or if it has a ssh-push key."
        echo "After fixing the problem run: mgr-proxy-ssh-push-init -a"
    else
        # remove any previously authorized key
        [ -f $AUTH_KEYS ] && sed -i '/susemanager-ssh-push/d' $AUTH_KEYS
        cat $TMP_PUSH_KEY_FILE >> $AUTH_KEYS && echo "Added public ssh-push key from $RHN_PARENT to $AUTH_KEYS."
    fi
    rm $TMP_PUSH_KEY_FILE
}

configure_sshd() {
    local SSHD_CONFIG="/etc/ssh/sshd_config"
    if ! grep "^[^#]*Match user $SSH_PUSH_USER" $SSHD_CONFIG> /dev/null ; then
        cat >> $SSHD_CONFIG <<EOF

Match user mgrsshtunnel
    ForceCommand /usr/sbin/mgr-proxy-ssh-force-cmd
    KbdInteractiveAuthentication no
    PasswordAuthentication no
    PubkeyAuthentication yes
    X11Forwarding no
    PermitTTY no

EOF
        echo "Updated ${SSHD_CONFIG}."
        printf "Restarting sshd..."
        systemctl restart sshd
        echo "done."
    else
        echo "sshd is already configured."
    fi
}

USE_EXISTING_SSH_PUSH_KEY=0
while getopts ":k:s:ah" opt; do
  case $opt in
    k)
      USE_EXISTING_SSH_PUSH_KEY=1
      EXISTING_SSH_KEY=$OPTARG
      ;;
    h)
      print_help
      exit 0
      ;;
    s)
      configure_sshd
      exit 0
      ;;
    a)
      authorize_parent_ssh_push_key
      exit 0
      ;;
    \?)
      echo "Invalid option: -$OPTARG" >&2
      exit 1
      ;;
    :)
      echo "Option -$OPTARG requires an argument." >&2
      exit 1
      ;;
  esac
done

generate_or_import_ssh_push_key
authorize_parent_ssh_push_key
configure_sshd0707010000001A000041FD00000000000000000000000368DD3ED300000000000000000000000000000000000000000000001300000000spacewalk-proxy/pm0707010000001B000081B400000000000000000000000168DD3ED300000382000000000000000000000000000000000000001C00000000spacewalk-proxy/pm/Makefile# Makefile for the PackageManager modules
#

TOP	= ..
SUBDIR  = PackageManager
FILES	= rhn_package_manager __init__
MAN_BASE = rhn_package_manager
MAN_SECT = 8
DOCBOOK2MAN = /usr/bin/docbook2man
MAN_DIR	= $(PREFIX)/usr/share/man/man$(MAN_SECT)
PM_BIN_DIR  = $(PREFIX)/usr/bin

SUBDIRS	= rhn-conf

all ::	all-man

install:: install-man install-bin

clean :: clean-man

all-man: make-man

make-man: $(MAN_BASE).$(MAN_SECT).gz

$(MAN_BASE).$(MAN_SECT): $(MAN_BASE).sgml
	$(DOCBOOK2MAN) $<

$(MAN_BASE).$(MAN_SECT).gz: $(MAN_BASE).$(MAN_SECT)
	gzip -c $< > $@

install-man: make-man $(MAN_DIR)
	$(INSTALL_DATA) $(MAN_BASE).$(MAN_SECT).gz $(MAN_DIR)

install-bin: rhn_package_manager $(PM_BIN_DIR)
	$(INSTALL_BIN) rhn_package_manager $(PM_BIN_DIR)

$(MAN_DIR) $(PM_BIN_DIR):
	$(INSTALL_DIR) $@

clean-man:
	@rm -fv $(MAN_BASE).8 $(MAN_BASE).8.gz manpage.links manpage.refs

include $(TOP)/Makefile.defs
0707010000001C000081B400000000000000000000000168DD3ED30000025F000000000000000000000000000000000000001F00000000spacewalk-proxy/pm/__init__.py#
# Copyright (c) 2008 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
0707010000001D000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001C00000000spacewalk-proxy/pm/rhn-conf0707010000001E000081B400000000000000000000000168DD3ED30000010D000000000000000000000000000000000000002500000000spacewalk-proxy/pm/rhn-conf/Makefile# Makefile for installation of the RHN Package Manager configuration files
#

# what is the backend top dir
TOP	= ../..

INSTALL_FILES	= $(wildcard *.conf)
INSTALL_DEST	= /usr/share/rhn/config-defaults

include $(TOP)/Makefile.defs

install :: $(PREFIX)$(INSTALL_DEST)
0707010000001F000081B400000000000000000000000168DD3ED30000010F000000000000000000000000000000000000003B00000000spacewalk-proxy/pm/rhn-conf/rhn_proxy_package_manager.conf# /etc/rhn/default/rhn_proxy_package_manager.conf
#

## unexposed
headers_per_call    = 25

## exposed
debug		    = 5
rhn_parent          =
http_proxy          = 
http_proxy_username = 
http_proxy_password = 
ca_chain            = /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT
07070100000020000081FD00000000000000000000000168DD3ED3000002C4000000000000000000000000000000000000002700000000spacewalk-proxy/pm/rhn_package_manager#!/usr/bin/python
#
# Wrapper for rhn_package_manager.py
#

LIBPATH = "/usr/share/rhn"

if __name__ == "__main__":
    import sys
    import os

    if os.getuid() != 0:
        sys.stderr.write("ERROR: must be root to execute\n")
        sys.exit(0)

    LIBPATH = os.path.abspath(LIBPATH)
    if LIBPATH not in sys.path:
        sys.path.append(LIBPATH)

    try:
        from PackageManager import rhn_package_manager
    except ImportError as e:
        sys.stderr.write(
            "Unable to find package management libraries.\n"
            "Path not correct? '%s'\n" % LIBPATH
        )
        raise

    try:
        rhn_package_manager.main()
    except SystemExit as e:
        sys.exit(e.code)
07070100000021000081FD00000000000000000000000168DD3ED30000438E000000000000000000000000000000000000002A00000000spacewalk-proxy/pm/rhn_package_manager.py#!/usr/bin/python
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Authors: Mihai Ibanescu <misa@redhat.com>
#          Todd Warner <taw@redhat.com>
#
"""\
Management tool for the Spacewalk Proxy.

This script performs various management operations on the Spacewalk Proxy:
- Creates the local directory structure needed to store local packages
- Uploads packages from a given directory to the RHN servers
- Optionally, once the packages are uploaded, they can be linked to (one or
  more) channels, and copied in the local directories for these channels.
- Lists the RHN server's vision on a certain channel
- Checks if the local image of the channel (the local directory) is in sync
  with the server's image, and prints the missing packages (or the extra
  ones)
- Cache any RPM content locally to avoid needing to download them. This can be
  particularly useful if bandwitdth is precious or the connection to the server
  is slow.
"""

# system imports
import gzip
import os
from xml.dom import minidom
import sys
import shutil

try:
    #  python 2
    import xmlrpclib
except ImportError:
    #  python3
    import xmlrpc.client as xmlrpclib
from optparse import Option, OptionParser  # pylint: disable=deprecated-module

# RHN imports
from spacewalk.common.rhnConfig import CFG, initCFG
from uyuni.common.rhnLib import parseUrl

initCFG("proxy.package_manager")
# pylint: disable=E0012, C0413
from rhnpush.uploadLib import UploadError
from rhnpush import uploadLib
from proxy.broker.rhnRepository import computePackagePaths

# globals
PREFIX = "rhn"


def main():
    # Initialize a command-line processing object with a table of options
    # pylint: disable-next=invalid-name,invalid-name
    optionsTable = [
        Option("-v", "--verbose", action="count", help="Increase verbosity", default=1),
        Option(
            "-d", "--dir", action="store", help="Process packages from this directory"
        ),
        Option(
            "-L",
            "--cache-locally",
            action="store_true",
            help="Locally cache packages so that Proxy will not ever need to "
            + "download them. Changes nothing on the upstream server.",
        ),
        Option(
            "-e",
            "--from-export",
            action="store",
            dest="export_location",
            help="Process packages from this channel export. Can only be used "
            + "with --cache-locally or --copyonly.",
        ),
        Option(
            "-c",
            "--channel",
            action="append",
            help="Channel to operate on. When used with --from-export "
            + "specifies channels to cache rpms for, else specifies channels "
            + "that we will be pushing into.",
        ),
        Option(
            "-n",
            "--count",
            action="store",
            help="Process this number of headers per call",
            type="int",
        ),
        Option(
            "-l", "--list", action="store_true", help="Only list the specified channels"
        ),
        Option(
            "-s", "--sync", action="store_true", help="Check if in sync with the server"
        ),
        Option(
            "-p",
            "--printconf",
            action="store_true",
            help="Print the configuration and exit",
        ),
        Option(
            "-X",
            "--exclude",
            action="append",
            help="Exclude packages that match this glob expression",
        ),
        Option(
            "--newest",
            action="store_true",
            help="Only push the files that are newer than the server ones",
        ),
        Option(
            "--stdin", action="store_true", help="Read the package names from stdin"
        ),
        Option("--nosig", action="store_true", help="Push unsigned packages"),
        Option(
            "--username", action="store", help="Use this username to connect to RHN"
        ),
        Option(
            "--password", action="store", help="Use this password to connect to RHN"
        ),
        Option("--source", action="store_true", help="Upload source package headers"),
        Option(
            "--dontcopy",
            action="store_true",
            help="Do not copy packages to the local directory",
        ),
        Option(
            "--copyonly",
            action="store_true",
            help="Only copy packages; don't reimport. Same as --cache-locally",
        ),
        Option(
            "--test", action="store_true", help="Only print the packages to be pushed"
        ),
        Option(
            "-N",
            "--new-cache",
            action="store_true",
            help="Create a new username/password cache",
        ),
        Option(
            "--no-session-caching",
            action="store_true",
            help="Disables session-token authentication.",
        ),
        Option(
            "-?", "--usage", action="store_true", help="Briefly describe the options"
        ),
    ]
    # Process the command line arguments
    # pylint: disable-next=invalid-name,invalid-name
    optionParser = OptionParser(
        option_list=optionsTable, usage="USAGE: %prog [OPTION] [<package>]"
    )
    options, files = optionParser.parse_args()
    # Below line needs fixing. Together with replacement of optparse.
    upload = UploadClass(
        options, files=files
    )  # pylint: disable=too-many-function-args,unexpected-keyword-arg

    if options.usage:
        optionParser.print_usage()
        sys.exit(0)

    if options.printconf:
        CFG.show()
        return

    if options.list:
        upload.list()
        return

    if options.sync:
        upload.checkSync()
        return

    # It's just an alias to copyonly
    if options.cache_locally:
        options.copyonly = True

    # remeber to process dir option before export, export can overwrite dir
    if options.dir:
        upload.directory()
    if options.export_location:
        if not options.copyonly:
            upload.die(
                0,
                "--from-export can only be used with --cache-locally"
                + " or --copyonly",
            )
        if options.source:
            upload.die(0, "--from-export cannot be used with --source")
        upload.from_export()
    if options.stdin:
        upload.readStdin()

    # if we're going to allow the user to specify packages by dir *and* export
    # *and* stdin *and* package list (why not?) then we have to uniquify
    # the list afterwards. Sort just for user-friendly display.
    upload.files = sorted(list(set(upload.files)))

    if options.copyonly:
        if not upload.files:
            upload.die(0, "Nothing to do; exiting. Try --help")
        if options.test:
            upload.test()
            return
        upload.copyonly()
        return

    if options.exclude:
        upload.filter_excludes()

    if options.newest:
        upload.newest()

    if not upload.files:
        upload.die(0, "Nothing to do; exiting. Try --help")

    if options.test:
        upload.test()
        return

    try:
        upload.uploadHeaders()
    except UploadError as e:
        # pylint: disable-next=consider-using-f-string,consider-using-f-string
        sys.stderr.write("Upload error: %s\n" % e)


# pylint: disable-next=missing-class-docstring,missing-class-docstring
class UploadClass(uploadLib.UploadClass):
    # pylint: disable=R0904,W0221

    # pylint: disable-next=invalid-name,invalid-name
    def setURL(self, path="/APP"):
        # overloaded for uploadlib.py
        if not CFG.RHN_PARENT:
            self.die(-1, "rhn_parent not set in the configuration file")
        self.url = CFG.RHN_PARENT
        scheme = "https://"
        self.url = CFG.RHN_PARENT or ""
        self.url = parseUrl(self.url)[1].split(":")[0]
        self.url = scheme + self.url + path

    # The rpm names in channel exports have been changed to be something like
    # rhn-package-XXXXXX.rpm, but that's okay because the rpm headers are
    # still intact and that's what we use to determine the destination
    # filename. Read the channel xml to determin what rpms to cache if the
    # --channel option was used.
    def from_export(self):
        export_dir = self.options.export_location
        self.warn(1, "Getting files from channel export: ", export_dir)
        if not self.options.channel:
            self.warn(2, "No channels specified, getting all files")
            # If no channels specified just upload all rpms from
            # all the rpm directories
            for hash_dir in uploadLib.listdir(os.path.join(export_dir, "rpms")):
                self.options.dir = hash_dir
                self.directory()
            return
        # else...
        self.warn(2, "Getting only files in these channels", self.options.channel)
        # Read the channel xml and add only packages that are in these channels
        package_set = set([])
        for channel in self.options.channel:
            xml_path = os.path.join(export_dir, "channels", channel, "channel.xml.gz")
            if not os.access(xml_path, os.R_OK):
                self.warn(
                    0,
                    # pylint: disable-next=consider-using-f-string
                    "Could not find metadata for channel %s, skipping..." % channel,
                )
                print(
                    # pylint: disable-next=consider-using-f-string,consider-using-f-string
                    "Could not find metadata for channel {}, skipping...".format(
                        channel
                    )
                )
                continue
            dom = minidom.parse(gzip.open(xml_path))
            # will only ever be the one
            dom_channel = dom.getElementsByTagName("rhn-channel")[0]
            package_set.update(
                dom_channel.attributes["packages"]
                .value.encode("ascii", "ignore")
                .split()
            )
        # Try to find relevent packages in the export
        for hash_dir in uploadLib.listdir(os.path.join(export_dir, "rpms")):
            for rpm in uploadLib.listdir(hash_dir):
                # rpm name minus '.rpm'
                if str.encode(os.path.basename(rpm)[:-4]) in package_set:
                    self.files.append(rpm)

    # pylint: disable-next=invalid-name,invalid-name
    def setServer(self):
        try:
            uploadLib.UploadClass.setServer(self)
            uploadLib.call(self.server.packages.no_op, raise_protocol_error=True)
        except xmlrpclib.ProtocolError as e:
            if e.errcode == 404:
                self.use_session = False
                self.setURL("/XP")
                uploadLib.UploadClass.setServer(self)
            else:
                raise

    def authenticate(self):
        if self.use_session:
            uploadLib.UploadClass.authenticate(self)
        else:
            self.setUsernamePassword()

    # pylint: disable-next=invalid-name,invalid-name
    def setProxyUsernamePassword(self):
        # overloaded for uploadlib.py
        # pylint: disable-next=invalid-name,invalid-name
        self.proxyUsername = CFG.HTTP_PROXY_USERNAME
        # pylint: disable-next=invalid-name,invalid-name
        self.proxyPassword = CFG.HTTP_PROXY_PASSWORD

    # pylint: disable-next=invalid-name,invalid-name
    def setProxy(self):
        # overloaded for uploadlib.py
        self.proxy = CFG.HTTP_PROXY

    # pylint: disable-next=invalid-name,invalid-name
    def setCAchain(self):
        # overloaded for uploadlib.py
        self.ca_chain = CFG.CA_CHAIN

    # pylint: disable-next=invalid-name,invalid-name
    def setNoChannels(self):
        self.channels = self.options.channel

    # pylint: disable-next=invalid-name,invalid-name
    def checkSync(self):
        # set the org
        self.setOrg()
        # set the URL
        self.setURL()
        # set the channels
        self.setChannels()
        # set the server
        self.setServer()

        self.authenticate()

        # List the channel's contents
        channel_list = self._listChannel()

        # Convert it to a hash of hashes
        # pylint: disable-next=invalid-name,invalid-name
        remotePackages = {}
        for channel in self.channels:
            remotePackages[channel] = {}
        for p in channel_list:
            # pylint: disable-next=invalid-name,invalid-name
            channelName = p[-1]
            key = tuple(p[:5])
            remotePackages[channelName][key] = None

        missing = []
        for package in channel_list:
            found = False
            # if the package includes checksum info
            if self.use_checksum_paths:
                checksum = package[6]
            else:
                checksum = None

            # pylint: disable-next=invalid-name,invalid-name
            packagePaths = computePackagePaths(package, 0, PREFIX, checksum)
            # pylint: disable-next=invalid-name,invalid-name
            for packagePath in packagePaths:
                # pylint: disable-next=invalid-name,consider-using-f-string,invalid-name,consider-using-f-string
                packagePath = "%s/%s" % (CFG.PKG_DIR, packagePath)
                if os.path.isfile(packagePath):
                    found = True
                    break
            if not found:
                missing.append([package, packagePaths[0]])

        if not missing:
            self.warn(0, "Channels in sync with the server")
            return

        # pylint: disable-next=invalid-name,invalid-name
        for package, packagePath in missing:
            # pylint: disable-next=invalid-name,invalid-name
            channelName = package[-1]
            self.warn(
                0,
                # pylint: disable-next=consider-using-f-string,consider-using-f-string
                "Missing: %s in channel %s (path %s)"
                % (rpmPackageName(package), channelName, packagePath),
            )

    # pylint: disable-next=invalid-name,invalid-name
    def processPackage(self, package, filename, checksum=None):
        if self.options.dontcopy:
            return

        if not CFG.PKG_DIR:
            self.warn(1, "No package directory specified; will not copy the package")
            return

        if not self.use_checksum_paths:
            checksum = None
        # Copy file to the prefered path
        # pylint: disable-next=invalid-name,invalid-name
        packagePath = computePackagePaths(
            package, self.options.source, PREFIX, checksum
        )[0]
        # pylint: disable-next=invalid-name,consider-using-f-string,invalid-name,consider-using-f-string
        packagePath = "%s/%s" % (CFG.PKG_DIR, packagePath)
        destdir = os.path.dirname(packagePath)
        if not os.path.isdir(destdir):
            # Try to create it
            try:
                os.makedirs(destdir, 0o755)
            except OSError:
                # pylint: disable-next=consider-using-f-string,consider-using-f-string
                self.warn(0, "Could not create directory %s" % destdir)
                return
        # pylint: disable-next=consider-using-f-string,consider-using-f-string
        self.warn(1, "Copying %s to %s" % (filename, packagePath))
        shutil.copy2(filename, packagePath)
        # Make sure the file permissions are set correctly, so that Apache can
        # see the files
        os.chmod(packagePath, 0o644)

    # pylint: disable-next=invalid-name,invalid-name
    def _listChannelSource(self):
        self.die(1, "Listing source rpms not supported")

    def copyonly(self):
        # Set the forcing factor
        self.setForce()
        # Relative directory
        self.setRelativeDir()
        # Set the count
        self.setCount()

        if not CFG.PKG_DIR:
            self.warn(1, "No package directory specified; will not copy the package")
            return

        # Safe because proxy X can't be activated against Spacewalk / Satellite
        # < X.
        self.use_checksum_paths = True

        for filename in self.files:
            fileinfo = self._processFile(
                filename,
                relativeDir=self.relativeDir,
                source=self.options.source,
                nosig=self.options.nosig,
            )
            self.processPackage(fileinfo["nvrea"], filename, fileinfo["checksum"])


# pylint: disable-next=invalid-name,invalid-name
def rpmPackageName(p):
    # pylint: disable-next=consider-using-f-string,consider-using-f-string
    return "%s-%s-%s.%s.rpm" % (p[0], p[1], p[2], p[4])


if __name__ == "__main__":
    try:
        main()
    except SystemExit as se:
        sys.exit(se.code)
07070100000022000081B400000000000000000000000168DD3ED30000240F000000000000000000000000000000000000002C00000000spacewalk-proxy/pm/rhn_package_manager.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
<!ENTITY PROXY "Spacewalk Proxy Server" -- use this to be consistent -->
<!ENTITY RHNPM "Spacewalk Package Manager" -- use this to be consistent -->

]>
<refentry>

<RefMeta>
<RefEntryTitle>rhn_package_manager</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 5.0</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>rhn_package_manager</command></RefName>
<RefPurpose>
Manages RPM packages for the Spacewalk Proxy
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>rhn_package_manager</command> 
        <arg>options</arg>
        <arg rep=repeat choice=plain><replaceable>file</replaceable></arg>
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>

<para>
    The &RHNPM; (<emphasis>rhn_package_manager</emphasis>) is the
    custom channel management tool for the &PROXY;. 
</para>

<para>
    A &PROXY; may manage <emphasis>local/custom channels</emphasis>. A
    <emphasis>channel</emphasis> is a logical grouping of packages that can be
    installed using <command>rpm</command>. The &RHNPM; is used to populate
    those custom channels with RPMs and SRPMs.
</para>

<para>
    The &RHNPM; also provides the ability to create a local cache of RPMs that
    does not expire and will be used instead of downloading the RPM when
    applicable. This is useful even if the RPMs would be available otherwise
    because it reduces the bandwidth required and can greatly speed up
    RPM retrieval in the case of a slow network. This is accomplished with the
    <emphasis>--cache-locally</emphasis> option in conjuction with a file list.
    Normally the file list would come from the
    <emphasis>--from-export</emphasis> or <emphasis>--dir</emphasis> options,
    but the <emphasis>--stdin</emphasis> option or a file list specified on the
    command line will also work. If for some reason you need to remove packages
    from this cache in the future you will need to delete them from the
    /var/spool/rhn-proxy/rhn directory. It is also possible to manually
    populate this cache by copying the files / directories from
    $base/redhat/*/ on a Spacewalk server into the above directory (where
    $base is configurable, but by default is /var/satellite).
</para>
</RefSect1>

<RefSect1><Title>Options</Title>
<variablelist>
    <varlistentry>
        <term>-v, --verbose</term>
        <listitem>
            <para>Increase verbosity</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-d <replaceable>directory</replaceable>, 
            --dir <replaceable>directory</replaceable></term>
        <listitem>
            <para>Process packages from this directory.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-L, --cache-locally</term>
        <listitem>
            <para>Only copy packages into the local cache; don't upload or
                  import the package metadata anywhere else. The only options
                  that can be used with <emphasis>--cache-locally</emphasis>
                  are <emphasis>--test</emphasis>,
                  <emphasis>--from-export</emphasis>,
                  <emphasis>--dir</emphasis>,
                  <emphasis>--stdin</emphasis>, and
                  <emphasis>--channel</emphasis> when used with
                  <emphasis>--from-export</emphasis>.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-e <replaceable>export_location</replaceable>,
            --from-export <replaceable>export_location</replaceable></term>
        <listitem>
            <para>Process packages from the channel export. This can be either
                  a channel export that you have generated with
                  rhn-satellite-exporter or channel dump ISOs obtained
                  elsewhere (ISOs must be mounted, and the mount location given
                  here). If used with <emphasis>--channel</emphasis> this
                  will only process packages that are contained in that
                  channel, else all packages in the export will be processed.
                  If the channel export is spread across multiple ISOs it is
                  not required that you recombine them locally before running
                  <emphasis>rhn_package_manager</emphasis>, however you must
                  repeat the same command with each ISO to ensure that all
                  packages are found. Only functions in conjunction with the
                  <emphasis>--cache-locally</emphasis> or
                  <emphasis>--copyonly</emphasis> options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-c <replaceable>channel</replaceable>, 
            --channel <replaceable>channel</replaceable></term>
        <listitem>
            <para>Operate on this channel -- may be present multiple times.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-n <replaceable>count</replaceable>, 
            --count <replaceable>count</replaceable></term>
        <listitem>
            <para>Process this number of headers per call -- the default is
            32.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-l, --list</term>
        <listitem>
            <para>List the specified packages of the specified
            channel(s).</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-s, --sync</term>
        <listitem>
            <para>Check if in sync with the server.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-p, --printconf</term>
        <listitem>
            <para>Print the current configuration and exit.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-X <replaceable>pattern</replaceable>, 
            --exclude <replaceable>pattern</replaceable></term>
        <listitem>
            <para>Exclude files matching this glob expression -- can be
                present multiple times.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--newest</term>
        <listitem>
            <para>Only push the files that are newer than the server ones.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--stdin</term>
        <listitem>
            <para>Read the package names from stdin.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--nosig</term>
        <listitem>
            <para>Push unsigned packages. By default the &RHNPM; only attempts
            to push signed packages.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--username <replaceable>username</replaceable></term>
        <listitem>
            <para>Use this username to connect to the Red Hat Satellite.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--password <replaceable>password</replaceable></term>
        <listitem>
            <para>Use this password to connect to the Red Hat Satellite.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--source</term>
        <listitem>
            <para>Upload source package headers.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--dontcopy</term>
        <listitem>
            <para>Do not copy packages to the local directory.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--copyonly</term>
        <listitem>
            <para>An alias to <emphasis>--cache-locally</emphasis> for
                  backwards compatiblity.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>--test</term>
        <listitem>
            <para>Only print the packages to be pushed.</para>
        </listitem>
    </varlistentry>
   <varlistentry>
        <term>-N, --new-cache</term>
        <listitem>
            <para>create a new username/password cache</para>
        </listitem>
    </varlistentry>
   <varlistentry>
        <term>--no-session-caching</term>
        <listitem>
            <para>This option disabled session token authentication. Useful if you want to push to two or more different servers.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-h, --help</term>
        <listitem>
            <para>Display the help screen with a list of options.</para>
        </listitem>
    </varlistentry>
    <varlistentry>
        <term>-?, --usage</term>
        <listitem>
            <para>Briefly describe the options.</para>
        </listitem>
    </varlistentry>
</variablelist>
</RefSect1>

<RefSect1><Title>Files</Title>
<simplelist>
    <member>/etc/rhn/rhn.conf</member>
</simplelist>
</RefSect1>

<RefSect1><Title>See Also</Title>
<simplelist>
    <member>rhn-proxy(8)</member>
    <member>rhn-proxy-activate(8)</member>
</simplelist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Mihai Ibanescu <email>misa@redhat.com</email></member>
    <member>Todd Warner <email>taw@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
07070100000023000081B400000000000000000000000168DD3ED300001393000000000000000000000000000000000000001900000000spacewalk-proxy/pylintrc# proxy package pylint configuration

[MASTER]

# Profiled execution.
profile=no

# Pickle collected data for later comparisons.
persistent=no


[MESSAGES CONTROL]

# Disable the message(s) with the given id(s).


disable=I0011,
	C0302,
	C0111,
	R0801,
	R0902,
	R0903,
	R0904,
	R0912,
	R0913,
	R0914,
	R0915,
	R0921,
	R0922,
	W0142,
	W0403,
	W0603,
	C1001,
	W0121,
	useless-else-on-loop,
	bad-whitespace,
	unpacking-non-sequence,
	superfluous-parens,
	cyclic-import,
	redefined-variable-type,
	no-else-return,

        # Uyuni disabled
	E0203,
	E0611,
	E1101,
	E1102

# list of disabled messages:
#I0011: 62: Locally disabling R0201
#C0302:  1: Too many lines in module (2425)
#C0111:  1: Missing docstring
#R0902: 19:RequestedChannels: Too many instance attributes (9/7)
#R0903:  Too few public methods
#R0904: 26:Transport: Too many public methods (22/20)
#R0912:171:set_slots_from_cert: Too many branches (59/20)
#R0913:101:GETServer.__init__: Too many arguments (11/10)
#R0914:171:set_slots_from_cert: Too many local variables (38/20)
#R0915:171:set_slots_from_cert: Too many statements (169/50)
#W0142:228:MPM_Package.write: Used * or ** magic
#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog'
#W0603: 72:initLOG: Using the global statement
# for pylint-1.0 we also disable
#C1001: 46, 0: Old-style class defined. (old-style-class)
#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax)
#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop)
# pylint-1.1 checks
#C:334, 0: No space allowed after bracket (bad-whitespace)
#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence)
#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens)
#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens)

[REPORTS]

# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable

# Include message's id in output
include-ids=yes

# Tells whether to display a full report or only the messages
reports=yes

# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"

[VARIABLES]

# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy


[BASIC]

# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$

# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$

# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$

# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$

# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input


[DESIGN]

# Maximum number of arguments for function / method
max-args=10

# Maximum number of locals for function / method body
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6

# Maximum number of branch for function / method body
max-branchs=20

# Maximum number of statements in function / method body
max-statements=50

# Maximum number of parents for a class (see R0901).
max-parents=7

# Maximum number of attributes for a class (see R0902).
max-attributes=7

# Minimum number of public methods for a class (see R0903).
min-public-methods=1

# Maximum number of public methods for a class (see R0904).
max-public-methods=20


[CLASSES]


[FORMAT]

# Maximum number of characters on a single line.
max-line-length=120

# Maximum number of lines in a module
max-module-lines=1000

# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string='    '


[MISCELLANEOUS]

# List of note tags to take in consideration, separated by a comma.
notes=
07070100000024000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001900000000spacewalk-proxy/redirect07070100000025000081B400000000000000000000000168DD3ED3000000AB000000000000000000000000000000000000002200000000spacewalk-proxy/redirect/Makefile# Makefile for the apacheServer.py for Spacewalk Proxy SSL Redirect Server.
#

TOP	= ..
SUBDIR	= proxy/redirect
FILES	= __init__ rhnRedirect
include $(TOP)/Makefile.defs

07070100000026000081B400000000000000000000000168DD3ED300000265000000000000000000000000000000000000002500000000spacewalk-proxy/redirect/__init__.py#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
07070100000027000081B400000000000000000000000168DD3ED30000477F000000000000000000000000000000000000002800000000spacewalk-proxy/redirect/rhnRedirect.py# pylint: disable=missing-module-docstring,invalid-name
# Spacewalk Proxy Server SSL Redirect handler code.
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

# language imports
import socket
import re

try:
    # python 3
    from urllib.parse import urlparse, urlunparse
except ImportError:
    # python 2
    from urlparse import urlparse, urlunparse

# common module imports
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnTB import Traceback
from spacewalk.common import rhnFlags, apache
from rhn import connections
from uyuni.common import rhnLib

# local module imports
from proxy.rhnShared import SharedHandler
from proxy import rhnConstants


# Main apache entry point for the proxy.


class RedirectHandler(SharedHandler):
    """Spacewalk Proxy SSL Redirect specific handler code called by rhnApache.

    Workflow is:
    Client -> Apache:Broker -> Squid -> Apache:Redirect -> Satellite

    Redirect handler get all request for localhost:80 and they come
    from Broker handler through Squid, which hadle caching.
    Redirect module transform destination url to parent or http proxy.
    Depend on what we have in CFG.
    """

    def __init__(self, req):
        SharedHandler.__init__(self, req)
        # pylint: disable-next=invalid-name
        self.componentType = "proxy.redirect"
        self._initConnectionVariables(req)
        # pylint: disable-next=invalid-name
        self.rhnParentXMLRPC = None

    # pylint: disable-next=invalid-name,invalid-name
    def _initConnectionVariables(self, _req):
        """set connection variables
        NOTE: self.{caChain,rhnParent,httpProxy*} are initialized
              in SharedHandler
        """

        # pylint: disable-next=invalid-name
        effectiveURI = self._getEffectiveURI()
        # pylint: disable-next=invalid-name
        effectiveURI_parts = urlparse(effectiveURI)
        self.rhnParentXMLRPC = urlunparse(
            ("https", self.rhnParent, "/XMLRPC", "", "", "")
        )
        self.rhnParent = urlunparse(("https", self.rhnParent) + effectiveURI_parts[2:])

        # pylint: disable-next=consider-using-f-string
        log_debug(3, "remapped self.rhnParent:       %s" % self.rhnParent)
        # pylint: disable-next=consider-using-f-string
        log_debug(3, "remapped self.rhnParentXMLRPC: %s" % self.rhnParentXMLRPC)

    def handler(self):
        """Main handler for all requests pumped through this server."""

        log_debug(4, "In redirect handler")
        self._prepHandler()

        # Rebuild the X-Forwarded-For header so that it reflects the actual
        # path of the request.  We must do this because squid is unable to
        # determine the "real" client, and will make each entry in the chain
        # 127.0.0.1.
        # pylint: disable-next=invalid-name
        _oto = rhnFlags.get("outputTransportOptions")
        _oto["X-Forwarded-For"] = _oto["X-RHN-IP-Path"]

        self.rhnParent = self.rhnParent or ""  # paranoid

        log_debug(4, "Connecting to parent...")
        self._connectToParent()  # part 1

        log_debug(4, "Initiating communication with server...")
        status = self._serverCommo()  # part 2
        if status not in (apache.OK, apache.HTTP_PARTIAL_CONTENT):
            # pylint: disable-next=consider-using-f-string
            log_debug(3, "Leaving handler with status code %s" % status)
            return status

        log_debug(4, "Initiating communication with client...")
        # If we got this far, it has to be a good response
        return self._clientCommo(status)

    def _handleServerResponse(self, status):
        """Here, we'll override the default behavior for handling server responses
        so that we can adequately handle 302's.

        We will follow redirects unless it is redirect to (re)login page. In which
        case we change protocol to https and return redirect to user.
        """

        # In case of a 302, redirect the original request to the location
        # specified in the response.

        if status in (apache.HTTP_MOVED_TEMPORARILY, apache.HTTP_MOVED_PERMANENTLY):

            log_debug(1, "Received redirect response: ", status)

            # if we redirected to ssl version of login page, send redirect directly to user
            headers = self.responseContext.getHeaders()
            if headers is not None:
                # pylint: disable-next=invalid-name
                for headerKey in list(headers.keys()):
                    if headerKey == "location":
                        location = self._get_header(headerKey)
                        login = re.compile(r"https?://.*(/rhn/manager/login\?.*)")
                        m = login.match(location[0])
                        if m:
                            # pull server name out of "t:o:k:e:n:hostname1,t:o:k:e:n:hostname2,..."
                            proxy_auth = self.req.headers_in["X-RHN-Proxy-Auth"]
                            last_auth = proxy_auth.split(",")[-1]
                            server_name = last_auth.split(":")[-1]
                            log_debug(1, "Redirecting to SSL version of login page")
                            rhnLib.setHeaderValue(
                                self.req.headers_out,
                                "Location",
                                # pylint: disable-next=consider-using-f-string
                                "https://%s%s" % (server_name, m.group(1)),
                            )
                            return apache.HTTP_MOVED_PERMANENTLY

            # pylint: disable-next=invalid-name
            redirectStatus = self.__redirectToNextLocation()

            # At this point, we've either:
            #
            #     (a) successfully redirected to the 3rd party
            #     (b) been told to redirect somewhere else from the 3rd party
            #     (c) run out of retry attempts
            #
            # We'll keep redirecting until we've received HTTP_OK or an error.

            while redirectStatus in (
                apache.HTTP_MOVED_PERMANENTLY,
                apache.HTTP_MOVED_TEMPORARILY,
            ):

                # We've been told to redirect again.  We'll pass a special
                # argument to ensure that if we end up back at the server, we
                # won't be redirected again.

                log_debug(1, "Redirected again!  Code=", redirectStatus)
                # pylint: disable-next=invalid-name
                redirectStatus = self.__redirectToNextLocation(True)

            if redirectStatus not in (apache.HTTP_OK, apache.HTTP_PARTIAL_CONTENT):

                # We must have run out of retry attempts.  Fail over to Hosted
                # to perform the request.

                log_debug(
                    1,
                    "Redirection failed; retries exhausted.  " "Failing over.  Code=",
                    redirectStatus,
                )
                # pylint: disable-next=invalid-name
                redirectStatus = self.__redirectFailover()

            return SharedHandler._handleServerResponse(self, redirectStatus)

        else:
            # Otherwise, revert to default behavior.
            return SharedHandler._handleServerResponse(self, status)

    # pylint: disable-next=invalid-name,invalid-name
    def __redirectToNextLocation(self, loopProtection=False):
        """This function will perform a redirection to the next location, as
        specified in the last response's "Location" header. This function will
        return an actual HTTP response status code.  If successful, it will
        return apache.HTTP_OK, not apache.OK.  If unsuccessful, this function
        will retry a configurable number of times, as defined in
        CFG.NETWORK_RETRIES.  The following codes define "success".

          HTTP_OK
          HTTP_PARTIAL_CONTENT
          HTTP_MOVED_TEMPORARILY
          HTTP_MOVED_PERMANENTLY

        Upon successful completion of this function, the responseContext
        should be populated with the response.

        Arguments:

        loopProtection - If True, this function will insert a special
                       header into the new request that tells the RHN
                       server not to issue another redirect to us, in case
                       that's where we end up being redirected.

        Return:

        This function may return any valid HTTP_* response code.  See
        __redirectToNextLocationNoRetry for more info.
        """
        # pylint: disable-next=invalid-name
        retriesLeft = CFG.NETWORK_RETRIES

        # We'll now try to redirect to the 3rd party.  We will keep
        # retrying until we exhaust the number of allowed attempts.
        # Valid response codes are:
        #     HTTP_OK
        #     HTTP_PARTIAL_CONTENT
        #     HTTP_MOVED_PERMANENTLY
        #     HTTP_MOVED_TEMPORARILY

        # pylint: disable-next=invalid-name
        redirectStatus = self.__redirectToNextLocationNoRetry(loopProtection)
        while (
            redirectStatus != apache.HTTP_OK
            and redirectStatus != apache.HTTP_PARTIAL_CONTENT
            and redirectStatus != apache.HTTP_MOVED_PERMANENTLY
            and redirectStatus != apache.HTTP_MOVED_TEMPORARILY
            and retriesLeft > 0
        ):

            # pylint: disable-next=invalid-name
            retriesLeft = retriesLeft - 1
            log_debug(
                1,
                "Redirection failed; trying again.  " "Retries left=",
                retriesLeft,
                "Code=",
                redirectStatus,
            )

            # Pop the current response context and restore the state to
            # the last successful response.  The acts of remove the current
            # context will cause all of its open connections to be closed.
            self.responseContext.remove()

            # XXX: Possibly sleep here for a second?
            # pylint: disable-next=invalid-name
            redirectStatus = self.__redirectToNextLocationNoRetry(loopProtection)

        return redirectStatus

    # pylint: disable-next=invalid-name,invalid-name
    def __redirectToNextLocationNoRetry(self, loopProtection=False):
        """This function will perform a redirection to the next location, as
        specified in the last response's "Location" header. This function will
        return an actual HTTP response status code.  If successful, it will
        return apache.HTTP_OK, not apache.OK.  If unsuccessful, this function
        will simply return; no retries will be performed.  The following error
        codes can be returned:

        HTTP_OK,HTTP_PARTIAL_CONTENT - Redirect successful.
        HTTP_MOVED_TEMPORARILY     - Redirect was redirected again by 3rd party.
        HTTP_MOVED_PERMANENTLY     - Redirect was redirected again by 3rd party.
        HTTP_INTERNAL_SERVER_ERROR - Error extracting redirect information
        HTTP_SERVICE_UNAVAILABLE   - Could not connect to 3rd party server,
                                     connection was reset, or a read error
                                     occurred during communication.
        HTTP_*                     - Any other HTTP status code may also be
                                     returned.

        Upon successful completion of this function, a new responseContext
        will be created and pushed onto the stack.
        """

        # Obtain the redirect location first before we replace the current
        # response context.  It's contained in the Location header of the
        # previous response.

        # pylint: disable-next=invalid-name
        redirectLocation = self._get_header(rhnConstants.HEADER_LOCATION)

        # We are about to redirect to a new location so now we'll push a new
        # response context before we return any errors.
        self.responseContext.add()

        # There should always be a redirect URL passed back to us.  If not,
        # there's an error.

        if not redirectLocation:
            log_error("  No redirect location specified!")
            Traceback(mail=0)
            return apache.HTTP_INTERNAL_SERVER_ERROR

        # The _get_header function returns the value as a list.  There should
        # always be exactly one location specified.

        # pylint: disable-next=invalid-name
        redirectLocation = redirectLocation[0]
        log_debug(1, "  Redirecting to: ", redirectLocation)

        # Tear apart the redirect URL.  We need the scheme, the host, the
        # port (if not the default), and the URI.

        # pylint: disable-next=invalid-name,unused-variable
        _scheme, host, port, uri, query = self._parse_url(redirectLocation)

        # Add back the query string
        if query:
            uri += "?" + query

        # Now create a new connection.  We'll use SSL if configured to do
        # so.

        params = {
            "host": host,
            "port": port,
        }
        if CFG.has_key("timeout"):
            params["timeout"] = CFG.TIMEOUT
        log_debug(1, "  Redirecting with SSL.  Cert= ", self.caChain)
        params["trusted_certs"] = [self.caChain]
        connection = connections.HTTPSConnection(**params)

        # Put the connection into the current response context.
        self.responseContext.setConnection(connection)

        # Now open the connection to the 3rd party server.

        log_debug(4, "Attempting to connect to 3rd party server...")
        try:
            connection.connect()
        except socket.error as e:
            log_error("Error opening redirect connection", redirectLocation, e)
            Traceback(mail=0)
            return apache.HTTP_SERVICE_UNAVAILABLE
        log_debug(4, "Connected to 3rd party server:", connection.sock.getpeername())

        # Put the request out on the wire.

        response = None
        try:
            # We'll redirect to the URI made in the original request, but with
            # the new server instead.

            log_debug(4, "Making request: ", self.req.method, uri)
            connection.putrequest(self.req.method, uri)

            # Add some custom headers.

            if loopProtection:
                connection.putheader(rhnConstants.HEADER_RHN_REDIRECT, "0")

            log_debug(4, "  Adding original URL header: ", self.rhnParent)
            connection.putheader(rhnConstants.HEADER_RHN_ORIG_LOC, self.rhnParent)

            # Add all the other headers in the original request in case we
            # need to re-authenticate with Hosted.

            for hdr in list(self.req.headers_in.keys()):
                if hdr.lower().startswith("x-rhn"):
                    connection.putheader(hdr, self.req.headers_in[hdr])
                    log_debug(
                        4, "Passing request header: ", hdr, self.req.headers_in[hdr]
                    )

            connection.endheaders()

            response = connection.getresponse()
        except IOError as ioe:
            # Raised by getresponse() if server closes connection on us.
            log_error("Redirect connection reset by peer.", redirectLocation, ioe)
            Traceback(mail=0)

            # The connection is saved in the current response context, and
            # will be closed when the caller pops the context.
            return apache.HTTP_SERVICE_UNAVAILABLE

        except socket.error as se:  # pylint: disable=duplicate-except
            # Some socket error occurred.  Possibly a read error.
            log_error("Redirect request failed.", redirectLocation, se)
            Traceback(mail=0)

            # The connection is saved in the current response context, and
            # will be closed when the caller pops the context.
            return apache.HTTP_SERVICE_UNAVAILABLE

        # Save the response headers and body FD in the current communication
        # context.

        self.responseContext.setBodyFd(response)
        self.responseContext.setHeaders(response.msg)

        log_debug(
            4, "Response headers: ", list(self.responseContext.getHeaders().items())
        )
        log_debug(4, "Got redirect response.  Status=", response.status)

        # Return the HTTP status to the caller.

        return response.status

    # pylint: disable-next=invalid-name
    def __redirectFailover(self):
        """This routine resends the original request back to the satellite/hosted
        system if a redirect to a 3rd party failed.  To prevent redirection loops
        from occurring, an "X-RHN-Redirect: 0" header is passed along with the
        request.  This function will return apache.HTTP_OK if everything
        succeeded, otherwise it will return an appropriate HTTP error code.
        """

        # Add a special header which will tell the server not to send us any
        # more redirects.

        headers = rhnFlags.get("outputTransportOptions")
        headers[rhnConstants.HEADER_RHN_REDIRECT] = "0"

        log_debug(4, "Added X-RHN-Redirect header to outputTransportOptions:", headers)

        # Reset the existing connection and reconnect to the RHN parent server.

        self.responseContext.clear()
        self._connectToParent()

        # We'll just call serverCommo once more.  The X-RHN-Redirect constant
        # will prevent us from falling into an infinite loop.  Only GETs are
        # redirected, so we can safely pass an empty string in as the request
        # body.

        status = self._serverCommo()

        # This little hack isn't pretty, but lets us normalize our result code.

        if status == apache.OK:
            status = apache.HTTP_OK

        return status
07070100000028000081B400000000000000000000000168DD3ED300001449000000000000000000000000000000000000002300000000spacewalk-proxy/responseContext.py# pylint: disable=missing-module-docstring,invalid-name
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This module provides a response context for use by the proxy broker
# and redirect components.  This context provides a stackable set of
# response, header, and connection sets which can be used to easily maintain
# the proxy's response state in the event of redirects.

CXT_RESP_HEADERS = "headers"
CXT_RESP_BODYFD = "bodyFd"
CXT_CONNECTION = "connection"


class ResponseContext:
    """This class provides a response context for use by the proxy broker
    and redirect components.  This context provides a stackable set of
    response, header, and connection sets which can be used to easily maintain
    the proxy's response state in the event of redirects."""

    # Constructors and Destructors ############################################

    def __init__(self):
        # pylint: disable-next=invalid-name
        self._contextStack = []
        self.add()

    # Public Interface ########################################################

    # pylint: disable-next=invalid-name
    def getHeaders(self):
        """Get the current response headers."""
        return self._getCurrentContext()[CXT_RESP_HEADERS]

    # pylint: disable-next=invalid-name,invalid-name
    def setHeaders(self, responseHeaders):
        """Set the current response headers."""
        self._getCurrentContext()[CXT_RESP_HEADERS] = responseHeaders

    # pylint: disable-next=invalid-name
    def getBodyFd(self):
        """Get the current response body file descriptor."""
        return self._getCurrentContext()[CXT_RESP_BODYFD]

    # pylint: disable-next=invalid-name,invalid-name
    def setBodyFd(self, responseBodyFd):
        """Set the current response body file descriptor."""
        self._getCurrentContext()[CXT_RESP_BODYFD] = responseBodyFd

    # pylint: disable-next=invalid-name
    def getConnection(self):
        """Get the current connection object."""
        return self._getCurrentContext()[CXT_CONNECTION]

    # pylint: disable-next=invalid-name
    def setConnection(self, connection):
        """Set the current connection object."""
        self._getCurrentContext()[CXT_CONNECTION] = connection

    def add(self):
        """Add a new context to the stack. The new context becomes the current
        one.
        """
        self._contextStack.append(self._createContext())

    def remove(self):
        """Remove the current context."""
        if not self._isEmpty():
            self.close()
            self._contextStack.pop()

    def close(self):
        """Close the current context."""
        context = self._getCurrentContext()
        self._closeContext(context)

    def clear(self):
        """Close and remove all contexts."""
        while self._contextStack:
            self.remove()

    def __str__(self):
        """String representation."""
        return str(self._contextStack)

    # Helper Methods ##########################################################

    # pylint: disable-next=invalid-name
    def _isEmpty(self):
        return len(self._contextStack) <= 0

    @staticmethod
    # pylint: disable-next=invalid-name
    def _closeContext(context):
        if context:
            if context[CXT_RESP_BODYFD]:
                context[CXT_RESP_BODYFD].close()
            if context[CXT_CONNECTION]:
                context[CXT_CONNECTION].close()

    # pylint: disable-next=invalid-name
    def _getCurrentContext(self):
        return self._contextStack[-1]

    @staticmethod
    # pylint: disable-next=invalid-name,invalid-name,invalid-name
    def _createContext(responseHeaders=None, responseBodyFd=None, connection=None):
        return {
            CXT_RESP_HEADERS: responseHeaders,
            CXT_RESP_BODYFD: responseBodyFd,
            CXT_CONNECTION: connection,
        }


###############################################################################
# Test Routine
###############################################################################

if __name__ == "__main__":
    respContext = ResponseContext()
    print("init   | context = " + str(respContext))

    respContext.remove()
    print("remove | context = " + str(respContext))

    respContext.add()
    print("add    | context = " + str(respContext))

    respContext.remove()
    print("remove | context = " + str(respContext))

    respContext.add()
    respContext.add()
    print("addadd | context = " + str(respContext))

    respContext.clear()
    print("clear  | context = " + str(respContext))

    respContext.add()
    print("add    | context = " + str(respContext))
07070100000029000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001900000000spacewalk-proxy/rhn-conf0707010000002A000081B400000000000000000000000168DD3ED300000144000000000000000000000000000000000000002200000000spacewalk-proxy/rhn-conf/Makefile# Makefile for installation of the Spacewalk Proxy configuration files
#

# what is the backend top dir
TOP	= ..

INSTALL_FILES	= $(wildcard *.conf)
INSTALL_DEST	= /usr/share/rhn/config-defaults

include $(TOP)/Makefile.defs

install :: $(PREFIX)$(INSTALL_DEST)
	mkdir -p $(PREFIX)/etc/rhn
	touch $(PREFIX)/etc/rhn/rhn.conf
0707010000002B000081B400000000000000000000000168DD3ED300000393000000000000000000000000000000000000002800000000spacewalk-proxy/rhn-conf/rhn_proxy.conf# ** DO NOT EDIT **
# rhn_proxy.conf
#

## unexposed:
buffer_size = 16384
squid = 127.0.0.1:8080

## exposed:
traceback_mail = user0@example.com, user1@example.com

rhn_parent =

ca_chain = /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT

http_proxy =
http_proxy_username =
http_proxy_password =

pkg_dir = /var/spool/rhn-proxy

# Maximum time in seconds that you allow a transfer operation to take.
timeout = 120

# Size (in bytes) of the largest file that will be transfered entirely
# in memory. Anything larger will be written to /tmp. If you have enough
# ram and want to improve performance of file transfers that are larger
# than this (or don't have enough disk space in /tmp) then you can override
# by setting proxy.max_mem_file_size = <large_number> in /etc/rhn/rhn.conf.
# If you have problems with running out of memory during high load then try
# reducing this.
#
# 16MB in bytes
max_mem_file_size = 16384000
0707010000002C000081B400000000000000000000000168DD3ED30000011A000000000000000000000000000000000000002F00000000spacewalk-proxy/rhn-conf/rhn_proxy_broker.conf# ** DO NOT EDIT **
# rhn_proxy_broker.conf
#

### unexposed
log_file = /var/log/rhn/rhn_proxy_broker.log
proxy_local_flist = getPackage, getPackageSource, getPackageHeader
auth_cache_server = 127.0.0.1:9999

### exposed
debug = 1

# Use local storage by default
use_local_auth = 1
0707010000002D000081B400000000000000000000000168DD3ED30000009B000000000000000000000000000000000000003100000000spacewalk-proxy/rhn-conf/rhn_proxy_redirect.conf# ** DO NOT EDIT **
# rhn_proxy_redirect.conf
#

### unexposed
log_file = /var/log/rhn/rhn_proxy_redirect.log

### exposed
debug = 1

network_retries = 3

0707010000002E000081FD00000000000000000000000168DD3ED30000065A000000000000000000000000000000000000001A00000000spacewalk-proxy/rhn-proxy#!/bin/sh

HTTPD="httpd"

if [ -e /usr/lib/systemd/system/apache2.service -o -e /etc/init.d/apache2 ]; then
    HTTPD="apache2"
fi

SERVICES="squid $HTTPD salt-broker tftp"

if [ -e /etc/init.d/functions ]; then
    . /etc/init.d/functions
fi

RETVAL=0

forward_services() {
    ACTION="$1"

    for service in $SERVICES; do
	if [ -e /etc/init.d/$service -o -e /usr/lib/systemd/system/$service.service ]; then
	    /sbin/service $service $ACTION
	    let RETVAL=$RETVAL+$?
	fi
	if [ $RETVAL -gt 0 ]; then
	    RETVAL=1
	fi
    done
}

reverse_services() {
    ACTION="$1"

    for service in $(echo $SERVICES | tac -s" "); do
	if [ -e /etc/init.d/$service -o -e /usr/lib/systemd/system/$service.service ]; then
	    /sbin/service $service $ACTION
            let RETVAL=$RETVAL+$?
        fi
        if [ $RETVAL -gt 0 ]; then
            RETVAL=1
        fi
    done
}

start() {
        echo "Starting spacewalk-proxy..."
	forward_services start
	echo "Done."
        return 0
}

stop() {
        echo "Shutting down spacewalk-proxy..."
	reverse_services stop
	if [ -e /usr/lib/systemd/system/tftp.socket ]; then
    systemctl stop tftp.socket
    fi
	echo "Done."
        return 0
}

restart() {
    stop
    sleep 2
    # if service has not been started and stop fail, we do not care
    RETVAL=0
    start
}

case "$1" in
    start)
	start
        ;;
    stop)
	stop
        ;;
    status)
	forward_services status
        ;;
    restart)
        restart
        ;;
    condrestart)
        restart
        ;;
    *)
        echo "Usage: rhn-proxy {start|stop|status|restart}"
        exit 1
        ;;
esac
exit $RETVAL
0707010000002F000081B400000000000000000000000168DD3ED3000004FF000000000000000000000000000000000000001F00000000spacewalk-proxy/rhn-proxy.sgml<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
<!ENTITY RHNPROXY "Spacewalk Proxy Server" >
<!ENTITY NAME "Spacewalk Proxy initialization script" >
<!ENTITY COMMAND "rhn-proxy" >

]>
<refentry>

<RefMeta>
<RefEntryTitle>&COMMAND;</RefEntryTitle><manvolnum>8</manvolnum>
<RefMiscInfo>Version 0.5</RefMiscInfo>
</RefMeta>

<RefNameDiv>
<RefName><command>&COMMAND;</command></RefName>
<RefPurpose>
&COMMAND; is the initialization mechanism used to start and stop &RHNPROXY;
 services.
</RefPurpose>
</RefNameDiv>

<RefSynopsisDiv>
<Synopsis>
    <cmdsynopsis>
        <command>/usr/sbin/&COMMAND; start|stop|status|restart|reload</command> 
    </cmdsynopsis>
</Synopsis>
</RefSynopsisDiv>

<RefSect1><Title>Description</Title>

<para>
    The &NAME; (<emphasis>&COMMAND;</emphasis>) is a single initialization
    mechanism used to bring up and down all services used by a &RHNPROXY;.
    Those services include squid, httpd and others.
</para>

</RefSect1>

<RefSect1><Title>See Also</Title>
<simplelist>
    <member>rhn_package_manager(8)</member>
    <member>rhn-proxy-activate(8)</member>
</simplelist>
</RefSect1>

<RefSect1><Title>Authors</Title>
<simplelist>
    <member>Todd Warner <email>taw@redhat.com</email></member>
</simplelist>
</RefSect1>
</RefEntry>
07070100000030000081B400000000000000000000000168DD3ED30000199C000000000000000000000000000000000000002600000000spacewalk-proxy/rhnAuthCacheClient.py# pylint: disable=missing-module-docstring,invalid-name
# rhnAuthCacheClient.py
# -------------------------------------------------------------------------------
# Implements a client-side 'remote shelf' caching object used for
# authentication token caching.
# (Client, meaning, a client to the authCache daemon)
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
# -------------------------------------------------------------------------------

## language imports
import socket
import sys

try:
    #  python 2
    from xmlrpclib import Fault
except ImportError:
    #  python3
    from xmlrpc.client import Fault

## local imports
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTranslate import _
from uyuni.common.usix import raise_with_tb
from .rhnAuthProtocol import CommunicationError, send, recv

#
# Protocol description:
# 1. Send the size of the data as a long (4 bytes), in network order
# 2. Send the data
#

# Shamelessly stolen from xmlrpclib.xmlrpc


class _Method:
    """Bind XML-RPC to an RPC Server

    Some magic to bind an XML-RPC method to an RPC server.
    Supports "nested" methods (e.g. examples.getStateName).
    """

    # pylint: disable=R0903

    def __init__(self, msend, name):
        self.__send = msend
        self.__name = name

    def __getattr__(self, name):
        # pylint: disable-next=consider-using-f-string
        return _Method(self.__send, "%s.%s" % (self.__name, name))

    def __call__(self, *args):
        return self.__send(self.__name, args)

    def __str__(self):
        # pylint: disable-next=consider-using-f-string
        return "<_Method instance at %s>" % id(self)

    __repr__ = __str__


class Shelf:
    """Client authenication temp. db.

    Main class that the client side (client to the caching daemon) has to
    instantiate to expose the proper API. Basically, the API is a dictionary.
    """

    # pylint: disable=R0903

    def __init__(self, server_addr):
        log_debug(6, server_addr)
        # pylint: disable-next=invalid-name
        self.serverAddr = server_addr

    def __request(self, methodname, params):
        # pylint: disable=R0915
        log_debug(6, methodname, params)
        # Init the socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        try:
            sock.connect(self.serverAddr)
        except socket.error as e:
            sock.close()
            methodname = None
            # pylint: disable-next=consider-using-f-string
            log_error("Error connecting to the auth cache: %s" % str(e))
            Traceback(
                "Shelf.__request",
                # pylint: disable-next=consider-using-f-string
                extra="""
              Error connecting to the the authentication cache daemon.
              Make sure it is started on %s"""
                % str(self.serverAddr),
            )
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        "Spacewalk Proxy error (issues connecting to auth cache). "
                        "Please contact your system administrator"
                    ),
                ),
                sys.exc_info()[2],
            )

        wfile = sock.makefile("w")

        try:
            send(wfile, methodname, None, *params)
        except CommunicationError:
            wfile.close()
            sock.close()
            Traceback("Shelf.__request", extra="Encountered a CommunicationError")
            raise
        except socket.error:
            wfile.close()
            sock.close()
            # pylint: disable-next=consider-using-f-string
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback(
                "Shelf.__request",
                extra="""\
                     Error sending to the authentication cache daemon.
                     Make sure the authentication cache daemon is started""",
            )
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        "Spacewalk Proxy error (issues connecting to auth cache). "
                        "Please contact your system administrator"
                    ),
                ),
                sys.exc_info()[2],
            )

        wfile.close()

        rfile = sock.makefile("r")
        try:
            params, methodname = recv(rfile)
        except CommunicationError as e:
            log_error(e.faultString)
            rfile.close()
            sock.close()
            # pylint: disable-next=consider-using-f-string
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback(
                "Shelf.__request",
                extra="""\
                      Error receiving from the authentication cache daemon.
                      Make sure the authentication cache daemon is started""",
            )
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        "Spacewalk Proxy error (issues communicating to auth cache). "
                        "Please contact your system administrator"
                    ),
                ),
                sys.exc_info()[2],
            )
        except Fault as e:
            rfile.close()
            sock.close()
            raise

        return params[0]

    def __getattr__(self, name):
        log_debug(6, name)
        return _Method(self.__request, name)

    def __str__(self):
        # pylint: disable-next=consider-using-f-string
        return "<Remote-Shelf instance at %s>" % id(self)
07070100000031000081B400000000000000000000000168DD3ED300000B1F000000000000000000000000000000000000002300000000spacewalk-proxy/rhnAuthProtocol.py# pylint: disable=missing-module-docstring,invalid-name
# Communication routines for sockets connecting to the auth token cache daemon
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# -------------------------------------------------------------------------------

## system imports
import struct

## local imports
try:
    #  python 2
    from xmlrpclib import dumps, loads
except ImportError:
    #  python3
    from xmlrpc.client import dumps, loads


class CommunicationError(Exception):

    def __init__(self, faultCode, faultString, *params):
        Exception.__init__(self)
        # pylint: disable-next=invalid-name
        self.faultCode = faultCode
        # pylint: disable-next=invalid-name
        self.faultString = faultString
        self.args = params


# pylint: disable-next=invalid-name
def readSocket(fd, n):
    """Reads exactly n bytes from the file descriptor fd (if possible)"""
    result = ""  # The result
    while n > 0:
        buff = fd.read(n)
        if not buff:
            break
        n = n - len(buff)
        result = result + buff
    return result


def send(fd, *params, methodname=None, fault=None):
    if methodname:
        buff = dumps(params, methodname=methodname)
    elif fault:
        buff = dumps(fault)
    else:
        buff = dumps(params)
    # Write the length first
    fd.write(struct.pack("!L", len(buff)))
    # Then send the data itself
    fd.write(buff)
    return len(buff)


def recv(rfile):
    # Compute the size of an unsigned int
    n = struct.calcsize("L")
    # Read the first bytes to figure out the size
    buff = readSocket(rfile, n)
    if len(buff) != n:
        # Incomplete read
        # pylint: disable-next=consider-using-f-string
        raise CommunicationError(0, "Expected %d bytes; got only %d" % (n, len(buff)))

    (n,) = struct.unpack("!L", buff)

    if n > 65536:
        # The buffer to be read is too big
        # pylint: disable-next=consider-using-f-string
        raise CommunicationError(1, "Block too big: %s" % len(buff))

    buff = readSocket(rfile, n)
    if len(buff) != n:
        # Incomplete read
        # pylint: disable-next=consider-using-f-string
        raise CommunicationError(0, "Expected %d bytes; got only %d" % (n, len(buff)))

    return loads(buff)
07070100000032000081B400000000000000000000000168DD3ED300000592000000000000000000000000000000000000002000000000spacewalk-proxy/rhnConstants.py#!/usr/bin/python
# pylint: disable=invalid-name
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
##
# rhnDefines.py - Constants used throughout the Spacewalk Proxy.
# -----------------------------------------------------------------------------
#

"""Constants used by the Spacewalk Proxy"""

# HTTP Headers

HEADER_ACTUAL_URI = "X-RHN-ActualURI"
HEADER_EFFECTIVE_URI = "X-RHN-EffectiveURI"
HEADER_CHECKSUM = "X-RHN-Checksum"
HEADER_LOCATION = "Location"
HEADER_CONTENT_LENGTH = "Content-Length"
HEADER_RHN_REDIRECT = "X-RHN-Redirect"
HEADER_RHN_ORIG_LOC = "X-RHN-OriginalLocation"

# HTTP Schemes

SCHEME_HTTP = "http"
SCHEME_HTTPS = "https"

# These help us match URIs when kickstarting through a Proxy.

URI_PREFIX_KS = "/ty/"
URI_PREFIX_KS_CHECKSUM = "/ty-cksm/"

# Component Constants

COMPONENT_BROKER = "proxy.broker"
COMPONENT_REDIRECT = "proxy.redirect"
07070100000033000081B400000000000000000000000168DD3ED30000599E000000000000000000000000000000000000002000000000spacewalk-proxy/rhnProxyAuth.py# pylint: disable=missing-module-docstring,invalid-name
# Spacewalk Proxy Server authentication manager.
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# -----------------------------------------------------------------------------

# system imports
import os
import time
import socket

try:
    #  python 2
    import xmlrpclib
except ImportError:
    #  python3
    import xmlrpc.client as xmlrpclib
import sys

from hashlib import sha256

# sys.path.append('/usr/share/rhn')
from rhn import rpclib
from rhn import SSL
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.common import rhnCache
from spacewalk.common.rhnTranslate import _
from up2date_client import config  # pylint: disable=E0012, C0413
from uyuni.common.rhnLib import parseUrl
from uyuni.common.usix import raise_with_tb
from . import rhnAuthCacheClient

if hasattr(socket, "sslerror"):
    socket_error = socket.sslerror  # pylint: disable=no-member
else:
    from ssl import socket_error

# To avoid doing unnecessary work, keep ProxyAuth object global
# pylint: disable-next=invalid-name
__PROXY_AUTH = None
UP2DATE_CONFIG = config.Config("/etc/sysconfig/rhn/up2date")
PRODUCT_NAME = "SUSE Multi-Linux Manager"


def get_proxy_auth(hostname=None):
    global __PROXY_AUTH
    if not __PROXY_AUTH:
        __PROXY_AUTH = ProxyAuth(hostname)
    if __PROXY_AUTH.hostname != hostname:
        __PROXY_AUTH = ProxyAuth(hostname)
    return __PROXY_AUTH


# pylint: disable-next=missing-class-docstring
class ProxyAuth:

    # pylint: disable-next=invalid-name
    __serverid = None
    # pylint: disable-next=invalid-name
    __systemid = None
    # pylint: disable-next=invalid-name
    __systemid_mtime = None
    # pylint: disable-next=invalid-name
    __systemid_filename = UP2DATE_CONFIG["systemIdPath"]

    # pylint: disable-next=invalid-name
    __nRetries = 3  # number of login retries

    hostname = None

    def __init__(self, hostname):
        log_debug(3)
        ProxyAuth.hostname = hostname
        self.__processSystemid()

    # pylint: disable-next=invalid-name
    def __processSystemid(self):
        """update the systemid/serverid but only if they stat differently.
        returns 0=no updates made; or 1=updates were made
        """
        mtime = None
        try:
            statinfo = os.stat(ProxyAuth.__systemid_filename)
            mtime = statinfo.st_mtime
            if statinfo.st_size == 0:
                raise_with_tb(
                    rhnFault(
                        1000,
                        _(
                            f"{PRODUCT_NAME} Proxy is not configured, systemid file is empty. "
                            "Please contact your system administrator."
                        ),
                    ),
                    sys.exc_info()[2],
                )

        # pylint: disable-next=unused-variable
        except FileNotFoundError as e:
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy is not configured, systemid file is missing. "
                        "Please contact your system administrator."
                    ),
                ),
                sys.exc_info()[2],
            )
        except IOError as e:
            log_error(
                # pylint: disable-next=consider-using-f-string
                "unable to stat %s: %s"
                % (ProxyAuth.__systemid_filename, repr(e))
            )
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error ({PRODUCT_NAME} Proxy systemid has wrong permissions?). "
                        "Please contact your system administrator."
                    ),
                ),
                sys.exc_info()[2],
            )

        if not os.access(ProxyAuth.__systemid_filename, os.R_OK):
            # pylint: disable-next=consider-using-f-string
            log_error("unable to access %s" % ProxyAuth.__systemid_filename)
            raise rhnFault(
                1000,
                _(
                    f"{PRODUCT_NAME} Proxy error ({PRODUCT_NAME} Proxy systemid has wrong permissions?). "
                    "Please contact your system administrator."
                ),
            )

        if not self.__systemid_mtime:
            ProxyAuth.__systemid_mtime = mtime

        if self.__systemid_mtime == mtime and self.__systemid and self.__serverid:
            # nothing to do
            return 0

        # get systemid
        try:
            # pylint: disable-next=unspecified-encoding
            ProxyAuth.__systemid = open(ProxyAuth.__systemid_filename, "r").read()
        except IOError as e:
            # pylint: disable-next=consider-using-f-string
            log_error("unable to read %s" % ProxyAuth.__systemid_filename)
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error ({PRODUCT_NAME} Proxy systemid has wrong permissions?). "
                        "Please contact your system administrator."
                    ),
                ),
                sys.exc_info()[2],
            )

        # get serverid
        # pylint: disable-next=invalid-name,unused-variable
        sysid, _cruft = xmlrpclib.loads(ProxyAuth.__systemid)
        ProxyAuth.__serverid = sysid[0]["system_id"][3:]

        log_debug(
            7,
            # pylint: disable-next=consider-using-f-string
            'SystemId: "%s[...snip  snip...]%s"'
            # pylint: disable-next=unsubscriptable-object,unsubscriptable-object
            % (ProxyAuth.__systemid[:20], ProxyAuth.__systemid[-20:]),
        )  # pylint: disable=unsubscriptable-object
        # pylint: disable-next=consider-using-f-string
        log_debug(7, "ServerId: %s" % ProxyAuth.__serverid)

        # ids were updated
        return 1

    def get_system_id(self):
        """return the system id"""
        self.__processSystemid()
        return self.__systemid

    # pylint: disable-next=invalid-name
    def check_cached_token(self, forceRefresh=0):
        """check cache, login if need be, and cache."""
        log_debug(3)
        # pylint: disable-next=invalid-name
        oldToken = self.get_cached_token()
        token = oldToken
        if not token or forceRefresh or self.__processSystemid():
            token = self.login()
        if token and token != oldToken:
            self.set_cached_token(token)
        return token

    def get_cached_token(self):
        """Fetches this proxy's token (or None) from the cache"""
        log_debug(3)
        # Try to connect to the token-cache.
        shelf = get_auth_shelf()
        # Fetch the token
        key = self.__cache_proxy_key()
        if shelf.has_key(key):
            return shelf[key]
        return None

    def set_cached_token(self, token):
        """Caches current token in the auth cache."""
        log_debug(3)
        # Try to connect to the token-cache.
        shelf = get_auth_shelf()
        # Cache the token.
        try:
            shelf[self.__cache_proxy_key()] = token
        except:  # pylint: disable=bare-except
            text = (
                _(
                    """\
Caching of authentication token for proxy id %s failed!
Either the authentication caching daemon is experiencing
problems, isn't running, or the token is somehow corrupt.
"""
                )
                % self.__serverid
            )
            Traceback("ProxyAuth.set_cached_token", extra=text)
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error (auth caching issue). "
                        "Please contact your system administrator."
                    ),
                ),
                sys.exc_info()[2],
            )
        log_debug(4, "successfully returning")
        return token

    def del_cached_token(self):
        """Removes the token from the cache"""
        log_debug(3)
        # Connect to the token cache
        shelf = get_auth_shelf()
        key = self.__cache_proxy_key()
        try:
            del shelf[key]
        except KeyError:
            # no problem
            pass

    def login(self):
        # pylint: disable-next=pointless-statement
        f""" Login and fetch new token (proxy token).

            How it works in a nutshell.
            Only the broker component uses this. We perform a xmlrpc request
            to rhn_parent. This occurs outside of the http process we are
            currently working on. So, we do this all on our own; do all of
            our own SSL decisionmaking etc. We use CFG.RHN_PARENT as we always
            bypass the SSL redirect.

            DESIGN NOTES:  what is the proxy auth token?
            -------------------------------------------
            An {PRODUCT_NAME} Proxy auth token is a token fetched upon login from
            {PRODUCT_NAME} Server or hosted.

            It has this format:
               'S:U:ST:EO:SIG'
            Where:
               S   = server ID
               U   = username
               ST  = server time
               EO  = expiration offset
               SIG = signature
               H   = hostname (important later)

            Within this function within the {PRODUCT_NAME} Proxy Broker we also tag on
            the hostname to the end of the token. The token as described above
            is enough for authentication purposes, but we need a to identify
            the exact hostname (as the {PRODUCT_NAME} Proxy sees it). So now the token
            becomes (token:hostname):
               'S:U:ST:EO:SIG:H'

            DESIGN NOTES:  what is X-RHN-Proxy-Auth?
            -------------------------------------------
            This is where we use the auth token beyond {PRODUCT_NAME} Proxy login
            purposes. This a header used to track request routes through
            a hierarchy of {PRODUCT_NAME} Proxies.

            X-RHN-Proxy-Auth is a header that passes proxy authentication
            information around in the form of an ordered list of tokens. This
            list is used to gain information as to how a client request is
            routed throughout an RHN topology.

            Format: 'S1:U1:ST1:EO1:SIG1:H1,S2:U2:ST2:EO2:SIG2:H2,...'
                     |_________1_________| |_________2_________| |__...
                             token                 token
                     where token is really: token:hostname

            leftmost token was the first token hit by a client request.
            rightmost token was the last token hit by a client request.

        """
        # pylint: disable=R0915

        log_debug(3)
        server = self.__getXmlrpcServer()
        error = None
        token = None
        # update the systemid/serverid if need be.
        self.__processSystemid()
        # Makes three attempts to login
        # pylint: disable-next=invalid-name,unused-variable
        for _i in range(self.__nRetries):
            try:
                token = server.proxy.login(self.__systemid)
            except (socket.error, socket_error) as e:
                if CFG.HTTP_PROXY:
                    # socket error, check to see if your HTTP proxy is running...
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    # pylint: disable-next=invalid-name,invalid-name
                    httpProxy, httpProxyPort = CFG.HTTP_PROXY.split(":")
                    try:
                        s.connect((httpProxy, int(httpProxyPort)))
                    # pylint: disable-next=redefined-outer-name
                    except socket.error as e:
                        error = [
                            "socket.error",
                            # pylint: disable-next=consider-using-f-string
                            "HTTP Proxy not running? " "(%s) %s" % (CFG.HTTP_PROXY, e),
                        ]
                        # rather big problem: http proxy not running.
                        # pylint: disable-next=consider-using-f-string
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    # pylint: disable-next=redefined-outer-name
                    except socket_error as e:  # pylint: disable=duplicate-except
                        # pylint: disable-next=consider-using-f-string
                        error = ["socket.sslerror", "(%s) %s" % (CFG.HTTP_PROXY, e)]
                        # rather big problem: http proxy not running.
                        # pylint: disable-next=consider-using-f-string
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    else:
                        error = ["socket", str(e)]
                        log_error(error)
                        Traceback(mail=0)
                else:
                    log_error("Socket error", e)
                    Traceback(mail=0)
                Traceback(mail=1)
                token = None
                time.sleep(0.25)
                continue
            except SSL.SSL.SSLError as e:
                token = None
                error = ["rhn.SSL.SSL.SSLError", repr(e), str(e)]
                log_error(error)
                Traceback(mail=0)
                time.sleep(0.25)
                continue
            except xmlrpclib.ProtocolError as e:
                token = None
                log_error("xmlrpclib.ProtocolError", e)
                time.sleep(0.25)
                continue
            except xmlrpclib.Fault as e:
                # Report it through the mail
                # Traceback will try to walk over all the values
                # in each stack frame, and eventually will try to stringify
                # the method object itself
                # This should trick it, since the originator of the exception
                # is this function, instead of a deep call into xmlrpclib
                # pylint: disable-next=consider-using-f-string
                log_error("%s" % e)
                if e.faultCode == 10000:
                    # reraise it for the users (outage or "important message"
                    # coming through")
                    raise_with_tb(
                        rhnFault(e.faultCode, e.faultString), sys.exc_info()[2]
                    )
                # ok... it's some other fault
                Traceback(
                    f"ProxyAuth.login (Fault) - {PRODUCT_NAME} Proxy not "
                    "able to log in."
                )
                # And raise a Proxy Error - the server made its point loud and
                # clear
                raise_with_tb(
                    rhnFault(
                        1000,
                        _(
                            f"{PRODUCT_NAME} Proxy error (during proxy login). "
                            "Please contact your system administrator."
                        ),
                    ),
                    sys.exc_info()[2],
                )
            except Exception as e:  # pylint: disable=broad-except
                token = None
                log_error("Unhandled exception", e)
                Traceback(mail=0)
                time.sleep(0.25)
                continue
            else:
                break

        if not token:
            if error:
                if error[0] in ("xmlrpclib.ProtocolError", "socket.error", "socket"):
                    raise rhnFault(
                        1000,
                        _(
                            f"{PRODUCT_NAME} Proxy error (error: %s). "
                            "Please contact your system administrator."
                        )
                        % error[0],
                    )
                if error[0] in ("rhn.SSL.SSL.SSLError", "socket.sslerror"):
                    raise rhnFault(
                        1000,
                        _(
                            f"{PRODUCT_NAME} Proxy error (SSL issues? Error: %s). "
                            "Please contact your system administrator."
                        )
                        % error[0],
                    )
                # pylint: disable-next=consider-using-f-string
                raise rhnFault(1002, err_text="%s" % e)
            raise rhnFault(1001)
        if self.hostname:
            token = token + ":" + self.hostname
        # pylint: disable-next=consider-using-f-string
        log_debug(6, "New proxy token: %s" % token)
        return token

    @staticmethod
    def get_client_token(clientid):
        shelf = get_auth_shelf()
        if shelf.has_key(clientid):
            return shelf[clientid]
        return None

    @staticmethod
    def set_client_token(clientid, token):
        shelf = get_auth_shelf()
        shelf[clientid] = token

    def update_client_token_if_valid(self, clientid, token):
        # Maybe a load-balanced proxie and client logged in through a
        # different one? Ask upstream if token is valid. If it is,
        # upate cache.
        # copy to simple dict for transmission. :-/
        # pylint: disable-next=invalid-name
        dumbToken = {}
        # pylint: disable-next=invalid-name
        satInfo = None
        for key in (
            "X-RHN-Server-Id",
            "X-RHN-Auth-User-Id",
            "X-RHN-Auth",
            "X-RHN-Auth-Server-Time",
            "X-RHN-Auth-Expire-Offset",
        ):
            if key in token:
                dumbToken[key] = token[key]
        try:
            s = self.__getXmlrpcServer()
            # pylint: disable-next=invalid-name
            satInfo = s.proxy.checkTokenValidity(dumbToken, self.get_system_id())
        except Exception:  # pylint: disable=E0012, W0703
            pass  # Satellite is not updated enough, keep old behavior

        # False if not valid token, a dict of info we need otherwise
        # We have to calculate the proxy-clock-skew between Sat and this
        # Proxy, as well as store the subscribed channels for this client
        # (which the client does not pass up in headers and which we
        # wouldn't trust even if it did).
        if satInfo:
            # pylint: disable-next=invalid-name
            clockSkew = time.time() - float(satInfo["X-RHN-Auth-Server-Time"])
            dumbToken["X-RHN-Auth-Proxy-Clock-Skew"] = clockSkew
            dumbToken["X-RHN-Auth-Channels"] = satInfo["X-RHN-Auth-Channels"]
            # update our cache so we don't have to ask next time
            self.set_client_token(clientid, dumbToken)
            return dumbToken
        return None

    # __private methods__

    @staticmethod
    # pylint: disable-next=invalid-name
    def __getXmlrpcServer():
        """get an xmlrpc server object"""
        log_debug(3)

        # build the URL
        url = CFG.RHN_PARENT or ""
        url = parseUrl(url)[1].split(":")[0]
        url = "https://" + url + "/XMLRPC"
        # pylint: disable-next=consider-using-f-string
        log_debug(3, "server url: %s" % url)

        if CFG.HTTP_PROXY:
            # pylint: disable-next=invalid-name
            serverObj = rpclib.Server(
                url,
                proxy=CFG.HTTP_PROXY,
                username=CFG.HTTP_PROXY_USERNAME,
                password=CFG.HTTP_PROXY_PASSWORD,
            )
        else:
            # pylint: disable-next=invalid-name
            serverObj = rpclib.Server(url)
        if CFG.CA_CHAIN:
            if not os.access(CFG.CA_CHAIN, os.R_OK):
                log_error(
                    # pylint: disable-next=consider-using-f-string
                    "ERROR: missing or cannot access (for ca_chain): %s"
                    % CFG.CA_CHAIN
                )
                raise rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error (file access issues). "
                        "Please contact your system administrator. "
                        "Please refer to {PRODUCT_NAME} Proxy logs."
                    ),
                )
            serverObj.add_trusted_cert(CFG.CA_CHAIN)
        serverObj.add_header("X-RHN-Client-Version", 2)
        return serverObj

    def __cache_proxy_key(self):
        return "p" + str(self.__serverid) + sha256(self.hostname.encode()).hexdigest()

    # pylint: disable-next=invalid-name
    def getProxyServerId(self):
        return self.__serverid


def get_auth_shelf():
    if CFG.USE_LOCAL_AUTH:
        return AuthLocalBackend()
    server, port = CFG.AUTH_CACHE_SERVER.split(":")
    port = int(port)
    return rhnAuthCacheClient.Shelf((server, port))


# pylint: disable-next=missing-class-docstring
class AuthLocalBackend:
    _cache_prefix = "proxy-auth"

    def __init__(self):
        pass

    def has_key(self, key):
        rkey = self._compute_key(key)
        return rhnCache.has_key(rkey)

    def __getitem__(self, key):
        rkey = self._compute_key(key)
        # We want a dictionary-like behaviour, so if the key is not present,
        # raise an exception (that's what missing_is_null=0 does)
        val = rhnCache.get(rkey, missing_is_null=0)
        return val

    def __setitem__(self, key, val):
        rkey = self._compute_key(key)
        return rhnCache.set(rkey, val)

    def __delitem__(self, key):
        rkey = self._compute_key(key)
        return rhnCache.delete(rkey)

    def _compute_key(self, key):
        # stripping forward slashes from the key.
        key = bytes([char for char in os.fsencode(key) if char != ord("/")]).decode()

        key_path = os.path.join(self._cache_prefix, str(key))
        if not os.path.normpath(key_path).startswith(self._cache_prefix):
            raise ValueError(
                "Path traversal detected for X-RHN-Server-ID. "
                + "User is trying to set a path as server-id."
            )
        return key_path

    def __len__(self):
        pass


# ==============================================================================
07070100000034000081B400000000000000000000000168DD3ED300005577000000000000000000000000000000000000001D00000000spacewalk-proxy/rhnShared.py# pylint: disable=missing-module-docstring,invalid-name
# Shared (Spacewalk Proxy/Redirect) handler code called by rhnApache.
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#

# language imports
try:
    # python 3
    import urllib.parse as urllib
except ImportError:
    # python 2
    import urllib
import socket
import sys

# global imports
from rhn import connections
from rhn.SSL import TimeoutException
from rhn.SmartIO import SmartIO

# common imports
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common import rhnFlags, apache
from spacewalk.common.rhnTranslate import _
from uyuni.common import rhnLib
from uyuni.common.usix import raise_with_tb, ListType, TupleType

# local imports
from . import rhnConstants
from .responseContext import ResponseContext

PRODUCT_NAME = "SUSE Multi-Linux Manager"


class SharedHandler:
    """Shared handler class (between rhnBroker and rhnRedirect.
    *** only inherited ***
    """

    # pylint: disable=R0902,R0903
    def __init__(self, req):
        """init with http request object"""

        # FIXME: should rename some things:
        #        self.bodyFd --> self.body or self.data or ?
        #        self.caChain --> self.caCert

        self.req = req
        # turn wsgi.input object into a SmartIO instance so it can be read
        # more than once
        if "wsgi.input" in self.req.headers_in:
            # pylint: disable-next=invalid-name
            smartFd = SmartIO(max_mem_size=CFG.MAX_MEM_FILE_SIZE)
            smartFd.write(self.req.headers_in["wsgi.input"].read())
            self.req.headers_in["wsgi.input"] = smartFd

        # pylint: disable-next=invalid-name
        self.responseContext = ResponseContext()
        self.uri = None  # ''

        # Common settings for both the proxy and the redirect
        # broker and redirect immediately alter these for their own purposes
        # pylint: disable-next=invalid-name
        self.caChain = CFG.CA_CHAIN
        # pylint: disable-next=invalid-name
        self.httpProxy = CFG.HTTP_PROXY
        # pylint: disable-next=invalid-name
        self.httpProxyUsername = CFG.HTTP_PROXY_USERNAME
        # pylint: disable-next=invalid-name
        self.httpProxyPassword = CFG.HTTP_PROXY_PASSWORD
        if not self.httpProxyUsername:
            self.httpProxyPassword = ""
        # pylint: disable-next=invalid-name
        self.rhnParent = CFG.RHN_PARENT or ""
        self.rhnParent = rhnLib.parseUrl(self.rhnParent)[1].split(":")[0]
        CFG.set("RHN_PARENT", self.rhnParent)

        # can we resolve self.rhnParent?
        # BUG 148961: not necessary, and dumb if the proxy is behind a firewall

    #        try:
    #            socket.gethostbyname(self.rhnParent)
    #        except socket.error, e:
    #            msg = "SOCKET ERROR: hostname: %s - %s" % (self.rhnParent, str(e))
    #            log_error(msg)
    #            log_debug(0, msg)
    #            raise

    # --- HANDLER SPECIFIC CODE ---

    # pylint: disable-next=invalid-name
    def _prepHandler(self):
        """Handler part 0"""

        # Just to be on the safe side
        if self.req.main:
            # A subrequest
            return apache.DECLINED
        log_debug(4, rhnFlags.all())

        if not self.rhnParent:
            raise rhnException("Oops, no proxy parent! Exiting")

        # Copy the headers.
        rhnFlags.get("outputTransportOptions").clear()
        rhnFlags.get("outputTransportOptions").update(self._getHeaders(self.req))

        return apache.OK

    # pylint: disable-next=invalid-name
    def _connectToParent(self):
        """Handler part 1
        Should not return an error code -- simply connects.
        """

        scheme, host, port, self.uri, query = self._parse_url(self.rhnParent)
        self.responseContext.setConnection(self._create_connection())

        if not self.uri:
            self.uri = "/"

        # if this request is for an upstream server, use the original query string.
        # Otherwise, if it is for the local Squid instance, strip it so that
        # Squid will not keep multiple cached copies of the same resource
        # Containers notes: when going for local proxy, use localhost as host to avoid
        # hairpin problem.
        if self.httpProxy not in ["127.0.0.1:8080", "localhost:8080"]:
            if "X-Suse-Auth-Token" in self.req.headers_in:
                # pylint: disable-next=consider-using-f-string
                self.uri += "?%s" % self.req.headers_in["X-Suse-Auth-Token"]
            elif query:
                # pylint: disable-next=consider-using-f-string
                self.uri += "?%s" % query
        else:
            host = "localhost"

        log_debug(3, "Scheme:", scheme)
        log_debug(3, "Host:", host)
        log_debug(3, "Port:", port)
        log_debug(3, "URI:", self.uri)
        log_debug(3, "HTTP proxy:", self.httpProxy)
        log_debug(3, "HTTP proxy username:", self.httpProxyUsername)
        log_debug(3, "HTTP proxy password:", "<password>")
        log_debug(3, "CA cert:", self.caChain)

        try:
            self.responseContext.getConnection().connect()
        except socket.error as e:
            log_error("Error opening connection", self.rhnParent, e)
            Traceback(mail=0)
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy could not successfully connect its {PRODUCT_NAME} parent. "
                        "Please contact your system administrator."
                    ),
                ),
                sys.exc_info()[2],
            )

        # At this point the server should be okay
        # pylint: disable-next=consider-using-f-string
        log_debug(3, "Connected to parent: %s " % self.rhnParent)
        if self.httpProxy:
            if self.httpProxyUsername:
                log_debug(
                    3,
                    # pylint: disable-next=consider-using-f-string
                    "HTTP proxy info: %s %s/<password>"
                    % (self.httpProxy, self.httpProxyUsername),
                )
            else:
                # pylint: disable-next=consider-using-f-string
                log_debug(3, "HTTP proxy info: %s" % self.httpProxy)
        else:
            log_debug(3, "HTTP proxy info: not using an HTTP proxy")
        peer = self.responseContext.getConnection().sock.getpeername()
        # pylint: disable-next=consider-using-f-string
        log_debug(4, "Other connection info: %s:%s%s" % (peer[0], peer[1], self.uri))

    def _create_connection(self):
        """Returns a Connection object"""
        # pylint: disable-next=invalid-name,invalid-name,unused-variable,unused-variable
        scheme, host, port, _uri, _query = self._parse_url(self.rhnParent)
        # Build the list of params
        params = {
            "host": host,
            "port": port,
        }

        # Containers notes: when going for local proxy, use localhost as host to avoid
        # hairpin problem.
        if self.httpProxy in ["127.0.0.1:8080", "localhost:8080"]:
            params["host"] = "localhost"

        if CFG.has_key("timeout"):
            params["timeout"] = CFG.TIMEOUT
        if self.httpProxy:
            params["proxy"] = self.httpProxy
            params["username"] = self.httpProxyUsername
            params["password"] = self.httpProxyPassword
        if scheme == "https" and self.caChain:
            params["trusted_certs"] = [
                self.caChain,
            ]

        # Now select the right class
        if self.httpProxy:
            if scheme == "https":
                conn_class = connections.HTTPSProxyConnection
            else:
                conn_class = connections.HTTPProxyConnection
        else:
            if scheme == "https":
                conn_class = connections.HTTPSConnection
            else:
                conn_class = connections.HTTPConnection

        log_debug(5, "Using connection class", conn_class, "Params:", params)
        return conn_class(**params)

    @staticmethod
    def _parse_url(url):
        """Returns scheme, host, port, path, query."""
        # pylint: disable-next=invalid-name,invalid-name,unused-variable,unused-variable
        scheme, netloc, path, _params, query, _frag = rhnLib.parseUrl(url)
        host, port = urllib.splitnport(netloc)
        if port <= 0:
            port = None
        return scheme, host, port, path, query

    # pylint: disable-next=invalid-name
    def _serverCommo(self):
        """Handler part 2

        Server (or next proxy) communication.
        """

        log_debug(2)

        # Copy the method from the original request, and use the
        # handler for this server
        # We add path_info to the put (GET, CONNECT, HEAD, PUT, POST) request.
        log_debug(2, self.req.method, self.uri)
        self.responseContext.getConnection().putrequest(self.req.method, self.uri)

        # Send the headers, the body and expect a response
        try:
            # pylint: disable-next=invalid-name
            status, headers, bodyFd = self._proxy2server()
            self.responseContext.setHeaders(headers)
            self.responseContext.setBodyFd(bodyFd)
        except IOError:
            # Raised by HTTP*Connection.getresponse
            # Server closed connection on us, no need to mail out
            # XXX: why are we not mailing this out???
            Traceback("SharedHandler._serverCommo", self.req, mail=0)
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error: connection with the {PRODUCT_NAME} server failed"
                    ),
                ),
                sys.exc_info()[2],
            )
        except socket.error:  # pylint: disable=duplicate-except
            # maybe self.req.read() failed?
            Traceback("SharedHandler._serverCommo", self.req)
            raise_with_tb(
                rhnFault(
                    1000,
                    _(
                        f"{PRODUCT_NAME} Proxy error: connection with the {PRODUCT_NAME} server failed"
                    ),
                ),
                sys.exc_info()[2],
            )

        # pylint: disable-next=consider-using-f-string
        log_debug(2, "HTTP status code (200 means all is well): %s" % status)

        # Now we need to decide how to deal with the server's response.  We'll
        # defer to subclass-specific implementation here.  The handler will
        # return apache.OK if the request was a success.

        return self._handleServerResponse(status)

    # pylint: disable-next=invalid-name
    def _handleServerResponse(self, status):
        """This method can be overridden by subclasses who want to handle server
        responses in their own way.  By default, we will wrap all the headers up
        and send them back to the client with an error status.  This method
        should return apache.OK if everything went according to plan.
        """
        if status not in (apache.HTTP_OK, apache.HTTP_PARTIAL_CONTENT):
            # Non 200 response; have to treat it differently
            # pylint: disable-next=consider-using-f-string
            log_debug(2, "Forwarding status %s" % status)
            # Copy the incoming headers to headers_out
            headers = self.responseContext.getHeaders()
            if headers is not None:
                for k in list(headers.keys()):
                    rhnLib.setHeaderValue(self.req.headers_out, k, self._get_header(k))
            else:
                log_error("WARNING? - no incoming headers found!")
            # And that's that
            return status

        if status == apache.HTTP_PARTIAL_CONTENT:
            return apache.HTTP_PARTIAL_CONTENT

        # apache.HTTP_OK becomes apache.OK.
        return apache.OK

    # pylint: disable-next=invalid-name
    def _get_header(self, k, headerObj=None):
        if headerObj is None:
            headerObj = self.responseContext.getHeaders()

        if hasattr(headerObj, "getheaders"):
            return headerObj.getheaders(k)

        return headerObj.get_all(k)

    # pylint: disable-next=invalid-name
    def _clientCommo(self, status=apache.OK):
        """Handler part 3
        Forward server's response to the client.
        """
        log_debug(2)

        try:
            self._forwardServer2Client()
        except IOError:
            # Raised by HTTP*connection.getresponse
            # Client closed connection on us, no need to mail out a traceback
            Traceback("SharedHandler._clientCommo", self.req, mail=0)
            return apache.HTTP_SERVICE_UNAVAILABLE

        # Close all open response contexts.
        self.responseContext.clear()

        return status

    # --- PROTECTED METHODS ---

    @staticmethod
    # pylint: disable-next=invalid-name
    def _getHeaders(req):
        """Copy the incoming headers."""

        hdrs = UserDictCase()
        for k in list(req.headers_in.keys()):
            # XXX misa: is this enough? Shouldn't we care about multivalued
            # headers?
            hdrs[k] = req.headers_in[k]
        return hdrs

    # pylint: disable-next=invalid-name
    def _forwardServer2Client(self):
        """Forward headers, and bodyfd from server to the calling client.
        For most XMLRPC code, this function is called.
        """

        log_debug(2)

        # Okay, nothing interesting from the server;
        # we'll just forward what we got

        # pylint: disable-next=invalid-name
        bodyFd = self.responseContext.getBodyFd()

        self._forwardHTTPHeaders(bodyFd, self.req)

        # Set the content type

        headers = self.responseContext.getHeaders()
        self.req.content_type = headers.get_content_type()
        self.req.send_http_header()

        # Forward the response body back to the client.

        self._forwardHTTPBody(bodyFd, self.req)

    def _proxy2server(self):
        hdrs = rhnFlags.get("outputTransportOptions")
        log_debug(3, hdrs)
        size = -1

        # Put the headers into the output connection object
        http_connection = self.responseContext.getConnection()
        for k, vals in list(hdrs.items()):
            if k.lower() in ["content_length", "content-length"]:
                try:
                    size = int(vals)
                except ValueError:
                    pass
            if k.lower() in ["content_length", "content_type"]:
                # mod_wsgi modifies incoming headers so we have to transform them back
                k = k.replace("_", "-")
            if not (
                k.lower().startswith("x-")
                or k.lower()
                in [  # all but 'host', and 'via'
                    "accept",
                    "accept-charset",
                    "accept-encoding",
                    "accept-language",
                    "accept-ranges",
                    "age",
                    "allow",
                    "authorization",
                    "cache-control",
                    "connection",
                    "content-encoding",
                    "content-language",
                    "content-length",
                    "content-location",
                    "content-md5",
                    "content-range",
                    "content-type",
                    "date",
                    "etag",
                    "expect",
                    "expires",
                    "from",
                    "if-match",
                    "if-modified-since",
                    "if-none-match",
                    "if-range",
                    "if-unmodified-since",
                    "last-modified",
                    "location",
                    "max-forwards",
                    "pragma",
                    "proxy-authenticate",
                    "proxy-authorization",
                    "range",
                    "referer",
                    "retry-after",
                    "server",
                    "te",
                    "trailer",
                    "transfer-encoding",
                    "upgrade",
                    "user-agent",
                    "vary",
                    "warning",
                    "www-authenticate",
                ]
            ):
                # filter out header we don't want to send
                continue
            if not isinstance(vals, (ListType, TupleType)):
                vals = [vals]
            for v in vals:
                log_debug(5, "Outgoing header", k, v)
                http_connection.putheader(k, v)
        http_connection.endheaders()

        # Send the body too if there is a body
        if size > 0:
            # reset file to beginning so it can be read again
            self.req.headers_in["wsgi.input"].seek(0, 0)
            if sys.version_info < (2, 6):
                data = self.req.headers_in["wsgi.input"].read(size)
            else:
                data = self.req.headers_in["wsgi.input"]
            http_connection.send(data)

        # At this point everything is sent to the server
        # We now wait for the response
        try:
            response = http_connection.getresponse()
        except TimeoutException:
            log_error("Connection timed out")
            return apache.HTTP_GATEWAY_TIME_OUT, None, None
        headers = response.msg
        status = response.status
        # Get the body of the request too - well, just a fd actually
        # in this case, the response object itself.
        # pylint: disable-next=invalid-name
        bodyFd = response
        return status, headers, bodyFd

    # pylint: disable-next=invalid-name
    def _getEffectiveURI(self):
        if rhnConstants.HEADER_EFFECTIVE_URI in self.req.headers_in:
            return self.req.headers_in[rhnConstants.HEADER_EFFECTIVE_URI]

        return self.req.uri

    @staticmethod
    # pylint: disable-next=invalid-name
    def _determineHTTPBodySize(headers):
        """This routine attempts to determine the size of an HTTP body by searching
        the headers for a "Content-Length" field.  The size is returned, if
        found, otherwise -1 is returned.
        """

        # Get the size of the body
        size = 0
        if rhnConstants.HEADER_CONTENT_LENGTH in headers:
            try:
                size = int(headers[rhnConstants.HEADER_CONTENT_LENGTH])
            except ValueError:
                size = -1
        else:
            size = -1

        return size

    # pylint: disable-next=invalid-name,invalid-name,invalid-name
    def _forwardHTTPHeaders(self, fromResponse, toRequest):
        """This routine will transfer the header contents of an HTTP response to
        the output headers of an HTTP request for reply to the original
        requesting client.  This function does NOT call the request's
        send_http_header routine; that is the responsibility of the caller.
        """

        if fromResponse is None or toRequest is None:
            return

        # Iterate over each header in the response and place it in the request
        # output area.

        for k in list(fromResponse.msg.keys()):
            # Get the value
            v = self._get_header(k, fromResponse.msg)

            if (k.lower() == "transfer-encoding") and ("chunked" in v):
                log_debug(5, "Filtering header", k, v)
                continue

            # Set the field in the response

            rhnLib.setHeaderValue(toRequest.headers_out, k, v)

    # pylint: disable-next=invalid-name,invalid-name,invalid-name
    def _forwardHTTPBody(self, fromResponse, toRequest):
        """This routine will transfer the body of an HTTP response to the output
        area of an HTTP request for response to the original requesting client.
        The request's send_http_header function must be called before this
        function is called.
        """
        if fromResponse is None or toRequest is None:
            return

        # Get the size of the body

        size = self._determineHTTPBodySize(fromResponse.msg)
        log_debug(4, "Response body size: ", size)

        # Now fill in the bytes if need be.

        # read content if there is some or the size is unknown
        if (size > 0 or size == -1) and (toRequest.method != "HEAD"):
            tfile = SmartIO(max_mem_size=CFG.MAX_MEM_FILE_SIZE)
            buf = fromResponse.read(CFG.BUFFER_SIZE)
            while buf:
                try:
                    tfile.write(buf)
                    buf = fromResponse.read(CFG.BUFFER_SIZE)
                except IOError:
                    buf = 0
            tfile.seek(0)
            if "wsgi.file_wrapper" in toRequest.headers_in:
                toRequest.output = toRequest.headers_in["wsgi.file_wrapper"](
                    tfile, CFG.BUFFER_SIZE
                )
            else:
                toRequest.output = iter(lambda: tfile.read(CFG.BUFFER_SIZE), "")
07070100000035000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001C00000000spacewalk-proxy/salt-broker07070100000036000081B400000000000000000000000168DD3ED30000107B000000000000000000000000000000000000002300000000spacewalk-proxy/salt-broker/broker##### Primary configuration settings #####
##########################################
#master: your.salt.master.hostname

######      Keepalive settings        ######
############################################
# ZeroMQ includes support for configuring SO_KEEPALIVE if supported by
# the OS. If connections between the broker and the master pass through
# a state tracking device such as a firewall or VPN gateway, there is
# the risk that it could tear down the connection the master and minion
# without informing either party that their connection has been taken away.
# Enabling TCP Keepalives prevents this from happening.

# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
# or leave to the OS defaults (\-1), on Linux, typically disabled. Default True, enabled.
#tcp_keepalive: True

# How long before the first keepalive should be sent in seconds. Default 300
# to send the first keepalive after 5 minutes, OS default (\-1) is typically 7200 seconds
# on Linux see /proc/sys/net/ipv4/tcp_keepalive_time.
#tcp_keepalive_idle: 300

# How many lost probes are needed to consider the connection lost. Default \-1
# to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes.
#tcp_keepalive_cnt: \-1

# How often, in seconds, to send keepalives after the first one. Default \-1 to
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: \-1

# Logs can be sent either to standard output/error or to file
# default: 1 to send logs to file
# 0 to send logs to standard output/error
log_to_file: 1

######   ZeroMQ connection options    ######
############################################
# For more details about the following parameters check ZeroMQ documentation:
# http://api.zeromq.org/4-2:zmq-setsockopt
# All of these parameters will be set to the backend sockets
# (from the salt-broker to the salt-master)

# connect_timeout (sets ZMQ_CONNECT_TIMEOUT)
# default: 0
# value unit: milliseconds
# Sets how long to wait before timing-out a connect to the remote socket.
# 0 could take much time, so it could be better to set to more strict value
# for particular environment depending on the network conditions.
# The value equal to 10000 is setting 10 seconds connect timeout.
connect_timeout: 0

# reconnect_ivl (sets ZMQ_RECONNECT_IVL)
# default: 100
# value unit: milliseconds
# Sets the interval of time before reconnection attempt on connection drop.
reconnect_ivl: 100

# heartbeat_ivl (sets ZMQ_HEARTBEAT_IVL)
# default: 0
# value unit: milliseconds
# This parameter is important for detection of loosing the connection.
# In case of value equal to 0 it is not sending heartbits.
# It's better to set to more relevant value for the particular environment,
# depending on possible network issues.
# The value equal to 20000 (20 seconds) works good for most cases.
heartbeat_ivl: 0

# heartbeat_timeout (sets ZMQ_HEARTBEAT_TIMEOUT)
# default: 0
# value unit: milliseconds
# Sets the interval of time to consider that the connection is timed out
# after sending the heartbeat and not getting the response on it.
# The value equal to 60000 (1 minute) is considering the connection is down
# after 1 minute of no response to the heartbeat.
heartbeat_timeout: 0


######   Other connection options    ######
# The following parameters are not related to ZeroMQ,
# but the internal parameters of the salt-broker.
# drop_after_retries
# default: -1
# value unit: number of retries
# Drop the frontend sockets of the salt-broker in case if it reaches
# the number of retries to reconnect to the backend socket.
# -1 means not drop the frontend sockets
# It's better to choose more relevant value for the particular environment.
# 10 can be a good choise for most of the cases.
drop_after_retries: -1

# wait_for_backend
# default: False
# The main aim of this parameter is to prevent  collecting the messages
# with the open frontend socket and prevent pushing them on connecting
# the backend socket to prevent large number of messages to be pushed
# at once to salt-master.
# It's better to set it to True if there is significant numer of minions
# behind the salt-broker.
wait_for_backend: False
07070100000037000081FD00000000000000000000000168DD3ED300004842000000000000000000000000000000000000002800000000spacewalk-proxy/salt-broker/salt-broker#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
saltbroker: A ZeroMQ Proxy (broker) for Salt Minions

The main process spawns a process for each channel of Salt ZMQ transport:

- PubChannelProxy process provides the PUB channel for the minions
- RetChannelProxy process provides the RET channel for the minions

Also acts like a supervisor for the child process, respawning them if they die.

:depends:   python-PyYAML
:depends:   python-pyzmq

Copyright (c) 2016--2025 SUSE LLC

All modifications and additions to the file contributed by third parties
remain the property of their copyright owners, unless otherwise agreed
upon. The license for this file, and modifications and additions to the
file, is the same license as for the pristine package itself (unless the
license for the pristine package is not an Open Source License, in which
case the license is the MIT License). An "Open Source License" is a
license that conforms to the Open Source Definition (Version 1.9)
published by the Open Source Initiative.

Please submit bugfixes or comments via http://bugs.opensuse.org/
"""

# Import python libs
import ipaddress
import logging
import logging.handlers
import multiprocessing
import os
import signal
import socket
import sys
import threading
import time
import traceback
import yaml

log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)

try:
    # Import RHN libs
    from spacewalk.common.rhnConfig import RHNOptions
except ImportError:
    log.info("RHNOptions is not available, running standalone")
    RHNOptions = None

# Import pyzmq lib
import zmq

from zmq.utils.monitor import recv_monitor_message

SALT_BROKER_CONF_FILE = os.environ.get("SALT_BROKER_CONF_FILE", "/etc/salt/broker")
SALT_BROKER_LOGFILE = os.environ.get("SALT_BROKER_LOGFILE", "/var/log/salt/broker")
SUPERVISOR_TIMEOUT = 5


def ip_bracket(addr, strip=False):
    """
    Ensure IP addresses are URI-compatible - specifically, add brackets
    around IPv6 literals if they are not already present.
    """
    addr = str(addr)
    addr = addr.lstrip("[")
    addr = addr.rstrip("]")
    addr = ipaddress.ip_address(addr)
    return ("[{}]" if addr.version == 6 and not strip else "{}").format(addr)


class AbstractChannelProxy(multiprocessing.Process):
    """
    Abstract class for ChannelProxy objects
    """

    class ChannelException(Exception):
        """
        Custom Exception definition
        """

        pass

    _BACKEND_SOCKOPTS = (
        ("TCP_KEEPALIVE", "tcp_keepalive"),
        ("TCP_KEEPALIVE_IDLE", "tcp_keepalive_idle"),
        ("TCP_KEEPALIVE_CNT", "tcp_keepalive_cnt"),
        ("TCP_KEEPALIVE_INTVL", "tcp_keepalive_intvl"),
        ("CONNECT_TIMEOUT", "connect_timeout"),
        ("RECONNECT_IVL", "reconnect_ivl"),
        ("HEARTBEAT_IVL", "heartbeat_ivl"),
        ("HEARTBEAT_TIMEOUT", "heartbeat_timeout"),
    )
    _FRONTEND_SOCKOPTS = ()

    def __init__(self, opts):
        self.opts = opts
        self.backend_connected = False
        if "master" not in self.opts:
            raise self.ChannelException(
                # pylint: disable-next=consider-using-f-string
                '[{}] No "master" opts is provided'.format(self.__class__.__name__)
            )
        try:
            self.opts["master_ip"] = socket.gethostbyname(self.opts["master"])
        except socket.gaierror as exc:
            raise self.ChannelException(
                # pylint: disable-next=consider-using-f-string
                "[{}] Error trying to resolve '{}': {}".format(
                    self.__class__.__name__, self.opts["master"], exc
                )
            )
        super().__init__()

    def run(self):
        try:
            context = zmq.Context()

            log.debug(
                # pylint: disable-next=logging-format-interpolation,consider-using-f-string
                "Setting up a {} sock on {}".format(
                    self.backend_type, self._backend_uri
                )
            )
            self.backend = context.socket(self._backend_sock_type)
            self.set_sockopts(
                self.backend,
                self._BACKEND_SOCKOPTS,
                self.backend_type,
                self.opts["master_ip"],
            )

            self.reconnect_retries = self.opts["drop_after_retries"]
            if self.reconnect_retries != -1:
                self.monitor_socket = self.backend.get_monitor_socket()
                self.monitor_thread = threading.Thread(
                    target=self.backend_socket_monitor, args=(self.monitor_socket,)
                )
                self.monitor_thread.start()

            self.backend.connect(self._backend_uri)

            if self.opts["wait_for_backend"]:
                while not self.backend_connected:
                    if self.backend.closed:
                        log.warning(
                            "Backend %s socket was closed while waiting for it. Terminating...",
                            self.backend_type,
                        )
                        return
                    time.sleep(0.5)

            log.debug(
                # pylint: disable-next=logging-format-interpolation,consider-using-f-string
                "Setting up a {} sock on {}".format(
                    self.frontend_type, self._frontend_uri
                )
            )

            self.frontend = context.socket(self._frontend_sock_type)
            self.set_sockopts(
                self.frontend,
                self._FRONTEND_SOCKOPTS,
                self.frontend_type,
                self.opts["interface"],
            )

            self.frontend.bind(self._frontend_uri)

            # Forward all messages
            log.info(
                "Staring ZMQ proxy on %s and %s sockets",
                self.frontend_type,
                self.backend_type,
            )
            try:
                zmq.proxy(self.frontend, self.backend)
            # pylint: disable-next=broad-exception-caught,unused-variable
            except Exception as e:
                log.error(
                    "Error while processing proxy with %s and %s sockets. Terminating...",
                    self.frontend_type,
                    self.backend_type,
                )
                return

        except zmq.ZMQError as zmq_error:
            if self.reconnect_retries == 0:
                # Do not raise error if drop_after_retries was used
                return
            # pylint: disable-next=consider-using-f-string
            msg = "ZMQ Error: {}".format(zmq_error)
            log.error(msg)
            raise self.ChannelException(msg)

        # pylint: disable-next=broad-exception-caught
        except Exception as exc:
            log.error("Exception: %s", exc)
            log.debug("Traceback: %s", traceback.format_exc())

    # pylint: disable-next=redefined-outer-name
    def set_sockopts(self, socket, sockopts, sock_type, addr=None):

        # pylint: disable-next=invalid-name
        def __set_sock_opt(opt, val, opt_name, sock_type):
            log.debug("Setting socket opt %s to %s on %s", opt_name, val, sock_type)
            socket.setsockopt(opt, val)

        for opt_name, opt_src in sockopts:
            opt = getattr(zmq, opt_name, None)
            if opt is None:
                log.error("Unable to ZMQ doesn't have %s socket opt", opt_name)
                continue
            if opt_src in self.opts:
                opt_val = self.opts[opt_src]
            elif isinstance(opt_src, tuple) and len(opt_src) == 1:
                opt_val = opt_src[0]
            else:
                log.error("Unable to get the value for socket opt %s", opt_name)
                continue
            __set_sock_opt(opt, opt_val, opt_name, sock_type)
        if (
            self.opts["ipv6"] is True or (addr is not None and ":" in addr)
        ) and hasattr(zmq, "IPV4ONLY"):
            # IPv6 sockets work for both IPv6 and IPv4 addresses
            __set_sock_opt(zmq.IPV4ONLY, 0, "IPV4ONLY", sock_type)
        if (addr is not None and ":" in addr) and hasattr(zmq, "IPV6"):
            __set_sock_opt(zmq.IPV6, 1, "IPV6", sock_type)

    def backend_socket_monitor(self, monitor_socket):
        while monitor_socket.poll():
            mon_evt = recv_monitor_message(monitor_socket)
            if self.reconnect_retries != -1:
                if mon_evt["event"] == zmq.EVENT_DISCONNECTED:
                    # pylint: disable-next=logging-format-interpolation,consider-using-f-string
                    log.warning("{} socket disconnected".format(self.backend_type))
                    self.backend_connected = False
                elif mon_evt["event"] == zmq.EVENT_CONNECTED:
                    # pylint: disable-next=logging-format-interpolation,consider-using-f-string
                    log.info("{} socket connected".format(self.backend_type))
                    self.backend_connected = True
                    self.reconnect_retries = self.opts["drop_after_retries"]
                elif mon_evt["event"] == zmq.EVENT_CONNECT_RETRIED:
                    if self.reconnect_retries == 0:
                        log.warning(
                            # pylint: disable-next=logging-format-interpolation,consider-using-f-string
                            "Closing {} socket due to retry attempts reached!".format(
                                self.backend_type
                            )
                        )
                        self.backend.close()
                        break
                    else:
                        self.reconnect_retries -= 1
            if mon_evt["event"] == zmq.EVENT_MONITOR_STOPPED:
                break
        monitor_socket.close()

    def terminate(self):
        """
        custom terminate function for the child process
        """
        log.info("Terminate called. Exiting")
        super().terminate()


class PubChannelProxy(AbstractChannelProxy):
    """
    Salt PUB Channel Proxy.

    Subscribes to the zmq PUB channel in the Salt master and binds a zmq SUB
    socket that allows minion to subscribe it and receive the forwarded
    messages from the Salt master.
    """

    # Prevent stopping publishing messages on XPUB socket. (bsc#1182954)
    _FRONTEND_SOCKOPTS = (
        ("XPUB_VERBOSE", (1,)),
        ("XPUB_VERBOSER", (1,)),
    )

    def __init__(self, opts):
        super().__init__(opts)
        self.name = "PubChannelProxy"

        self._backend_sock_type = zmq.XSUB
        self._frontend_sock_type = zmq.XPUB

        self.backend_type = "XSUB"
        self.frontend_type = "XPUB"

        # pylint: disable-next=consider-using-f-string
        self._backend_uri = "tcp://{}:{}".format(
            ip_bracket(self.opts["master_ip"]),
            self.opts["publish_port"],
        )
        # pylint: disable-next=consider-using-f-string
        self._frontend_uri = "tcp://{}:{}".format(
            ip_bracket(self.opts["interface"]),
            self.opts["publish_port"],
        )


class RetChannelProxy(AbstractChannelProxy):
    """
    Salt RET Channel Proxy.

    Connects to the zmq RET channel in the Salt master and binds a zmq ROUTER
    socket to receive messages from minions which are then forwarded to
    the Salt master.
    """

    def __init__(self, opts):
        super().__init__(opts)
        self.name = "RetChannelProxy"

        self._backend_sock_type = zmq.DEALER
        self._frontend_sock_type = zmq.ROUTER

        self.backend_type = "DEALER"
        self.frontend_type = "ROUTER"

        # pylint: disable-next=consider-using-f-string
        self._backend_uri = "tcp://{}:{}".format(
            ip_bracket(self.opts["master_ip"]),
            self.opts["ret_port"],
        )
        # pylint: disable-next=consider-using-f-string
        self._frontend_uri = "tcp://{}:{}".format(
            ip_bracket(self.opts["interface"]),
            self.opts["ret_port"],
        )


class SaltBroker(object):
    """
    Creates a SaltBroker that forward messages and responses from
    minions to Salt Master by creating a ZeroMQ proxy that manage
    the PUB/RET channels of the Salt ZMQ transport.
    """

    def __init__(self, opts):
        log.debug("Readed config: %s", opts)
        self.opts = opts
        self.exit = False
        self.default_sigterm = signal.getsignal(signal.SIGTERM)
        self.pub_proxy_proc = None
        self.ret_proxy_proc = None
        super().__init__()

    def _start_pub_proxy(self):
        """
        Spawn a new PubChannelProxy process
        """
        # setting up the default SIGTERM handler for the new process
        signal.signal(signal.SIGTERM, self.default_sigterm)

        # Spawn a new PubChannelProxy process
        pub_proxy = PubChannelProxy(opts=self.opts)
        pub_proxy.start()

        # setting up again the custom SIGTERM handler
        signal.signal(signal.SIGTERM, self.sigterm_clean)

        log.info("Spawning PUB channel proxy process [PID: %s]", pub_proxy.pid)

        return pub_proxy

    def _start_ret_proxy(self):
        """
        Spawn a new RetChannelProxy process
        """
        # setting up the default SIGTERM handler for the new process
        signal.signal(signal.SIGTERM, self.default_sigterm)

        # Spawn a new RetChannelProxy process
        ret_proxy = RetChannelProxy(opts=self.opts)
        ret_proxy.start()

        # setting up again the custom SIGTERM handler
        signal.signal(signal.SIGTERM, self.sigterm_clean)

        log.info("Spawning RET channel proxy process [PID: %s]", ret_proxy.pid)

        return ret_proxy

    # pylint: disable-next=unused-argument
    def sigterm_clean(self, signum, frame):
        """
        Custom SIGTERM handler
        """
        log.info("Caught signal %s, stopping all channels", signum)

        if self.pub_proxy_proc:
            self.pub_proxy_proc.terminate()
        if self.ret_proxy_proc:
            self.ret_proxy_proc.terminate()

        self.exit = True
        log.info("Terminating main process")

    def start(self):
        """
        Starts a SaltBroker. It spawns the PubChannelProxy and
        RetChannelProxy processes and also acts like a supervisor
        of these child process respawning them if they died.
        """
        log.info("Starting Salt ZeroMQ Proxy [PID: %s]", os.getpid())

        # Attach a handler for SIGTERM signal
        signal.signal(signal.SIGTERM, self.sigterm_clean)

        try:
            self.pub_proxy_proc = self._start_pub_proxy()
            self.ret_proxy_proc = self._start_ret_proxy()
        except AbstractChannelProxy.ChannelException as exc:
            log.error("Exception: %s", exc)
            log.error("Exiting")
            sys.exit(exc)

        # Supervisor. Restart a channel if died
        while not self.exit:
            if not self.pub_proxy_proc.is_alive():
                log.error("PUB channel proxy has died. Respawning")
                self.pub_proxy_proc = self._start_pub_proxy()
            if not self.ret_proxy_proc.is_alive():
                log.error("RET channel proxy has died. Respawning")
                self.ret_proxy_proc = self._start_ret_proxy()
            time.sleep(SUPERVISOR_TIMEOUT)


if __name__ == "__main__":
    # Try to get config from /etc/rhn/rhn.conf
    rhn_parent = None
    if RHNOptions is not None:
        rhn_proxy_conf = RHNOptions(component="proxy")
        rhn_proxy_conf.parse()
        if rhn_proxy_conf.get("rhn_parent"):
            log.debug("Using 'rhn_parent' from /etc/rhn/rhn.conf as 'master'")
            rhn_parent = rhn_proxy_conf["rhn_parent"]

    # Check for the config file
    if not os.path.isfile(SALT_BROKER_CONF_FILE):
        # pylint: disable-next=consider-using-f-string
        sys.exit("Config file not found: {0}".format(SALT_BROKER_CONF_FILE))

    # default config
    _DEFAULT_OPTS = {
        "publish_port": "4505",
        "ret_port": "4506",
        "interface": "0.0.0.0",
        "ipv6": False,
        "tcp_keepalive": True,
        "tcp_keepalive_idle": 300,
        "tcp_keepalive_cnt": -1,
        "tcp_keepalive_intvl": -1,
        "log_to_file": 1,
        "connect_timeout": 0,
        "reconnect_ivl": 100,
        "heartbeat_ivl": 0,
        "heartbeat_timeout": 0,
        "drop_after_retries": -1,
        "wait_for_backend": False,
    }

    try:
        # pylint: disable-next=unspecified-encoding
        config = yaml.load(open(SALT_BROKER_CONF_FILE), Loader=yaml.SafeLoader)
        if not config:
            config = {}
        if not isinstance(config, dict):
            # pylint: disable-next=consider-using-f-string
            sys.exit("Bad format in config file: {0}".format(SALT_BROKER_CONF_FILE))

        saltbroker_opts = _DEFAULT_OPTS.copy()

        if rhn_parent:
            saltbroker_opts.update({"master": rhn_parent})

        saltbroker_opts.update(config)

        formatter = logging.Formatter(
            "%(asctime)s [%(levelname)-8s][%(processName)-16s][%(process)s] %(message)s",
        )
        # log to file or to standard output and error depending on the configuration
        if saltbroker_opts.get("log_to_file"):
            fileloghandler = logging.handlers.RotatingFileHandler(
                SALT_BROKER_LOGFILE, maxBytes=200000, backupCount=5
            )
            fileloghandler.setFormatter(formatter)
            log.addHandler(fileloghandler)
        else:
            # prepare two log handlers, 1 for stdout and 1 for stderr
            stdout_handler = logging.StreamHandler(sys.stdout)
            stderr_handler = logging.StreamHandler(sys.stderr)
            # stdout handler filters out everything above the ERROR level included
            stdout_handler.addFilter(lambda record: record.levelno < logging.ERROR)
            # stderror handler looks only for everything above the ERROR level included
            stderr_handler.setLevel(logging.ERROR)
            # same format for both handlers
            stdout_handler.setFormatter(formatter)
            stderr_handler.setFormatter(formatter)
            # add handlers to log Object
            log.addHandler(stdout_handler)
            log.addHandler(stderr_handler)

        proxy = SaltBroker(opts=saltbroker_opts)
        proxy.start()

    except yaml.scanner.ScannerError as exc:
        # pylint: disable-next=consider-using-f-string
        sys.exit("Error reading YAML config file: {0}".format(exc))
07070100000038000081B400000000000000000000000168DD3ED3000000B1000000000000000000000000000000000000003000000000spacewalk-proxy/salt-broker/salt-broker.service[Unit]
Description=Salt ZeroMQ Proxy (broker)
After=network.target

[Service]
Type=simple
LimitNOFILE=51200
ExecStart=/usr/bin/salt-broker

[Install]
WantedBy=multi-user.target
07070100000039000081B400000000000000000000000168DD3ED300005C55000000000000000000000000000000000000002800000000spacewalk-proxy/spacewalk-proxy.changes-------------------------------------------------------------------
Tue Jun 17 19:26:50 CEST 2025 - marina.latini@suse.com

- version 5.1.5-0
  * Use more secure sha256 to generate cache key
  * Enable passing config and log paths via environment
    variables to salt-broker

-------------------------------------------------------------------
Mon May 12 23:18:27 CEST 2025 - marina.latini@suse.com

- version 5.1.4-0
  * Disable directory listing (bsc#1241094)

-------------------------------------------------------------------
Fri Apr 11 17:10:38 CEST 2025 - marina.latini@suse.com

- version 5.1.3-0
  * Use the new product name SUSE Multi-Linux Manager

-------------------------------------------------------------------
Thu Dec 12 09:22:59 CET 2024 - rosuna@suse.com

- version 5.1.2-0
  * Add IPv6 support for salt-broker (bsc#1227827)
  * Make salt-broker reconnect if Master IP has changed
  * Make salt-broker less dependant on spacewalk libs
  * Make socket opt setting more strict and verbose (bsc#1229286)

-------------------------------------------------------------------
Mon Oct 14 15:51:19 CEST 2024 - rosuna@suse.com

- version 5.1.1-0
  * Set Proxy authtoken FQDN based on config file (bsc#1230255)
  * Allow execute of ssh-keygen command on the Proxy to cleanup SSH
    known_hosts (bsc#1228345)
  * Bump version to 5.1.0

-------------------------------------------------------------------
Mon Jun 10 17:10:09 CEST 2024 - marina.latini@suse.com

- version 5.0.3-0
  * Remove applet handler
  * Refactor salt-broker and implement flexible channels state
    monitoring

-------------------------------------------------------------------
Fri May 03 15:04:51 CEST 2024 - marina.latini@suse.com

- version 5.0.2-0
  * Remove the config management related WSGI script

-------------------------------------------------------------------
Tue Jan 16 08:09:14 CET 2024 - jgonzalez@suse.com

- version 5.0.1-1
  * Bump version to 5.0.0

-------------------------------------------------------------------
Fri Dec 15 17:19:54 CET 2023 - rosuna@suse.com

- version 4.4.7-1
  * Remove unused makefiles

-------------------------------------------------------------------
Mon Sep 18 14:47:49 CEST 2023 - rosuna@suse.com

- version 4.4.6-1
  * Use new apache wsgi mod package name
  * remove usage of mgr-cfg tools in post script

-------------------------------------------------------------------
Wed Apr 19 12:53:43 CEST 2023 - marina.latini@suse.com

- version 4.4.5-1
  * Better error message on missing systemid file (bsc#1209119)

-------------------------------------------------------------------
Tue Feb 21 12:30:07 CET 2023 - jgonzalez@suse.com

- version 4.4.4-1
  * Avoid unnecessary debug messages from proxy backend (bsc#1207490)

-------------------------------------------------------------------
Mon Jan 23 08:24:37 CET 2023 - jgonzalez@suse.com

- version 4.4.3-1
  * Updated logrotate configuration (bsc#1206470)

-------------------------------------------------------------------
Wed Dec 14 14:07:53 CET 2022 - jgonzalez@suse.com

- version 4.4.2-1
  * Handle tftp in rhn-proxy (bsc#1205976)
  * remove jabberd and osa-dispatcher

-------------------------------------------------------------------
Wed Sep 28 10:44:28 CEST 2022 - jgonzalez@suse.com

- version 4.4.1-1
  * Prefix log messages with the component name to ease analysis
  * renew the cached token when requested channel is not listed in
    the old token (bsc#1202724)

-------------------------------------------------------------------
Wed Jul 27 14:09:14 CEST 2022 - jgonzalez@suse.com

- version 4.3.11-1
  * Move certificates dependencies from broker to proxy package

-------------------------------------------------------------------
Fri May 20 00:09:53 CEST 2022 - jgonzalez@suse.com

- version 4.3.10-1
  * fix caching of debian packages in the proxy (bsc#1199401)

-------------------------------------------------------------------
Thu Apr 28 10:13:40 CEST 2022 - jgonzalez@suse.com

- version 4.3.9-1
  * Disable HSTS headers by default

-------------------------------------------------------------------
Tue Apr 19 12:03:58 CEST 2022 - jgonzalez@suse.com

- version 4.3.8-1
  * Forward images, kernel and initrd requests through squid to the
    server
    Part of saltboot containerization workflow
  * Enable HSTS for Apache to force traffic to be in HTTPS
  * Expose release notes to www_path

-------------------------------------------------------------------
Fri Mar 11 16:48:49 CET 2022 - jgonzalez@suse.com

- version 4.3.7-1
  * Prepare the package for the proxy on containers

-------------------------------------------------------------------
Fri Mar 11 15:10:21 CET 2022 - jgonzalez@suse.com

- version 4.3.6-1
  * Fix changelog format

-------------------------------------------------------------------
Fri Mar 11 14:52:27 CET 2022 - jgonzalez@suse.com

- version 4.3.5-1
  * Remove pylint according to Fedora package guidelines.

-------------------------------------------------------------------
Tue Feb 15 10:03:51 CET 2022 - jgonzalez@suse.com

- version 4.3.4-1
  * create directories for TLS Certificates

-------------------------------------------------------------------
Tue Jan 18 13:57:39 CET 2022 - jgonzalez@suse.com

- version 4.3.3-1
  * Remove old Python 2 dependency on module new from rhnAuthCacheClient
  * remove unnecessary package dependencies
  * add an option to send salt-broker logs to standard output/error
    instead of files
  * Update the token in case a channel can't be found in the cache.
    (bsc#1193585)

-------------------------------------------------------------------
Fri Nov 05 13:51:07 CET 2021 - jgonzalez@suse.com

- version 4.3.2-1
  * remove SSLProtocol configuration which should be done in the ssl
    configuration file

-------------------------------------------------------------------
Mon Aug 09 11:02:14 CEST 2021 - jgonzalez@suse.com

- version 4.3.1-1
- Adapted directory and file ownerships
- Modified for pylint pass.
- Fix build on Enterprise Linux
- Fix traceback on handling sslerror (bsc#1187673)

-------------------------------------------------------------------
Mon May 24 12:37:55 CEST 2021 - jgonzalez@suse.com

- version 4.2.5-1
- prevent stopping publishing messages on XPUB socket of salt-broker
  (bsc#1182954)
- using Loader=yaml.SafeLoader for yaml.load as using yaml.load
  without Loader is deprecated as the default Loader is unsafe

-------------------------------------------------------------------
Wed May 05 16:35:36 CEST 2021 - jgonzalez@suse.com

- version 4.2.4-1
- set max date to max one year (bsc#1175369)
- remove 'ignore-no-cache' which is obsolete (bsc#1175369)
- remove 127.0.0.1 acl which is already built in (bsc#1175369)

-------------------------------------------------------------------
Thu Feb 25 12:07:16 CET 2021 - jgonzalez@suse.com

- version 4.2.3-1
- adapt to new SSL implementation of rhnlib (bsc#1181807)

-------------------------------------------------------------------
Wed Jan 27 13:19:10 CET 2021 - jgonzalez@suse.com

- version 4.2.2-1
- fix package manager string compare - python3 porting issue

-------------------------------------------------------------------
Fri Sep 18 12:17:21 CEST 2020 - jgonzalez@suse.com

- version 4.2.1-1
- Python3 fix for loading pickle file during kickstart
  procedure (bsc#1174201)
- Update package version to 4.2.0

-------------------------------------------------------------------
Wed Nov 27 17:02:23 CET 2019 - jgonzalez@suse.com

- version 4.1.1-1
- fix problems with Package Hub repos having multiple rpms with same NEVRA
  but different checksums (bsc#1146683)
- build as python3 only package
- replace spacewalk-usix with uyuni-common-libs
- don't skip auth token check for remote actions
- Bump version to 4.1.0 (bsc#1154940)
- use /etc/rhn from uyuni-base-common
- move /usr/share/rhn/config-defaults to uyuni-base-common

-------------------------------------------------------------------
Wed Jul 31 17:35:03 CEST 2019 - jgonzalez@suse.com

- version 4.0.12-1
- Fix for CVE-2019-10137. A path traversal flaw was found in the
  way the proxy processes cached client tokens. A remote,
  unauthenticated, attacker could use this flaw to test the
  existence of arbitrary files, or if they have access to the
  proxy's filesystem, execute arbitrary code in the context of the
  proxy. (bsc#1136476)

-------------------------------------------------------------------
Wed May 15 15:12:44 CEST 2019 - jgonzalez@suse.com

- version 4.0.11-1
- SPEC cleanup

-------------------------------------------------------------------
Wed Apr 24 20:52:56 CEST 2019 - jgonzalez@suse.com

- version 4.0.10-1
- Fix config declaration for rhn.conf (bsc#1132197)

-------------------------------------------------------------------
Mon Apr 22 12:13:52 CEST 2019 - jgonzalez@suse.com

- version 4.0.9-1
- do not reset rhn.conf on proxy during upgrade (bsc#1132197)
- fix proxying chunked HTTP content via suse manager proxy
  This happens when calling XMLRPC API via the proxy
  (bsc#1128133)
- Add makefile and pylintrc for PyLint

-------------------------------------------------------------------
Wed Apr 03 17:11:19 CEST 2019 - jgonzalez@suse.com

- version 4.0.8-1
- remove apache access_compat module from config if it exists

-------------------------------------------------------------------
Mon Mar 25 16:43:43 CET 2019 - jgonzalez@suse.com

- version 4.0.7-1
- remove apache access_compat module and adapt config files

-------------------------------------------------------------------
Sat Mar 02 00:11:10 CET 2019 - jgonzalez@suse.com

- version 4.0.6-1
- Support token authentication for Debian/Ubuntu clients

-------------------------------------------------------------------
Wed Feb 27 13:03:00 CET 2019 - jgonzalez@suse.com

- version 4.0.5-1
- Fix issues after when running proxy on Python 3

-------------------------------------------------------------------
Wed Jan 16 12:23:41 CET 2019 - jgonzalez@suse.com

- version 4.0.4-1
- Require rhnlib with correct python version

-------------------------------------------------------------------
Mon Dec 17 14:38:53 CET 2018 - jgonzalez@suse.com

- version 4.0.3-1
- Add support for Python 3 on spacewalk-proxy

-------------------------------------------------------------------
Fri Oct 26 10:33:16 CEST 2018 - jgonzalez@suse.com

- version 4.0.2-1
- Change dependencies from rhnpush to mgr-push (bsc#1104034)

-------------------------------------------------------------------
Fri Aug 10 15:24:07 CEST 2018 - jgonzalez@suse.com

- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)

-------------------------------------------------------------------
Tue Jun 05 10:10:29 CEST 2018 - jgonzalez@suse.com

- version 2.8.5.3-1
- Increase max open files for salt-broker service (bsc#1094705)

-------------------------------------------------------------------
Mon Mar 05 08:51:41 CET 2018 - jgonzalez@suse.com

- version 2.8.5.2-1
- remove empty clean section from spec (bsc#1083294)

-------------------------------------------------------------------
Wed Feb 28 09:41:01 CET 2018 - jgonzalez@suse.com

- version 2.8.5.1-1
- Sync with upstream

-------------------------------------------------------------------
Wed Jan 17 12:53:02 CET 2018 - jgonzalez@suse.com

- version 2.8.2.1-1
- Proxy: use query string in upstream HEAD requests (bsc#1036260)

-------------------------------------------------------------------
Tue Nov 28 14:36:51 CET 2017 - jgonzalez@suse.com

- version 2.7.1.5-1
- Try to resolve the proxy hostname even if the HTTP 'Host' header
  is an ip address (bsc#1057542)

-------------------------------------------------------------------
Tue Aug 08 11:30:23 CEST 2017 - fkobzik@suse.de

- version 2.7.1.4-1
- Proxy: use query string in upstream HEAD requests (bsc#1036260)

-------------------------------------------------------------------
Mon May 29 15:06:36 CEST 2017 - mc@suse.de

- version 2.7.1.3-1
- on pkg upgrade move mgrsshtunnel home to /var/lib/spacewalk
- change mgrsshtunnel user home to /var/lib/spacewalk
- fix starting/stopping services rhn-proxy (bsc#1038858)
- don't append to parent key response to authorized_keys on http err
  (bsc#724390)

-------------------------------------------------------------------
Wed May 03 15:59:41 CEST 2017 - michele.bologna@suse.com

- version 2.7.1.2-1
- Lower the use-file-instead-of-memory treshold (bsc#1030342)

-------------------------------------------------------------------
Fri Mar 31 09:36:25 CEST 2017 - mc@suse.de

- version 2.7.1.1-1
- pylint fixes - proxy

-------------------------------------------------------------------
Tue Mar 07 14:47:41 CET 2017 - mc@suse.de

- version 2.7.0.3-1
- Updated links to github in spec files
- use SUSE product names instead of spacewalk/rhn (bsc#1000110)

-------------------------------------------------------------------
Tue Feb 07 15:07:39 CET 2017 - michele.bologna@suse.com

- version 2.7.0.2-1
- Add rcsalt-broker script (bsc#1012787)

-------------------------------------------------------------------
Wed Jan 11 16:24:33 CET 2017 - michele.bologna@suse.com

- version 2.7.0.1-1
- Bumping package versions for 2.7.

-------------------------------------------------------------------
Fri Dec 16 12:09:08 CET 2016 - michele.bologna@suse.com

- version 2.5.1.5-1
- Add keepalive settings for ZeroMQ connections from broker to master
  (bsc#1012613)
- Revert "provide /usr/share/spacewalk in proxy" (bsc#1008221)

-------------------------------------------------------------------
Mon Nov 07 11:04:27 CET 2016 - michele.bologna@suse.com

- version 2.5.1.4-1
- Fix auth of traditional clients via proxy (bsc#1008221)

-------------------------------------------------------------------
Thu Oct 06 15:05:12 CEST 2016 - mc@suse.de

- version 2.5.1.3-1
- Support 'X-Mgr-Auth' headers in proxy for RedHat minions
- Fix for Proxy chains: we only use suseLib.accessible when auth
  token is present
- Check for the auth token in HEAD requests
- Renaming saltproxy to salt-broker. Using /etc/salt/ and /var/log/salt/
- make proxy aware of URLs with auth tokens
- Salt ZeroMQ proxy service

-------------------------------------------------------------------
Mon Mar 21 16:37:00 CET 2016 - mc@suse.de

- version 2.5.1.2-1
- fix file permissions (bsc#970550)

-------------------------------------------------------------------
Tue Dec 15 19:32:24 CET 2015 - mc@suse.de

- version 2.5.1.1-1
- remove old dependency

-------------------------------------------------------------------
Mon Nov 30 11:01:33 CET 2015 - mc@suse.de

- version 2.5.0.2-1
- remove deprecated DefaultType in apache proxy configuration.
- fix start of proxy services
- add module 'version' to apache configuration

-------------------------------------------------------------------
Wed Oct 07 14:35:40 CEST 2015 - mc@suse.de

- version 2.5.0.1-1
- Bumping package versions for 2.5.
- change permissions on config-default directory

-------------------------------------------------------------------
Mon Jun 22 16:19:47 CEST 2015 - jrenner@suse.de

- version 2.1.15.7-1
- disable WebUI redirecting (bsc#922923)
- make proxy able to understand (bad) requests from ubuntu clients

-------------------------------------------------------------------
Thu May 28 07:43:14 UTC 2015 - smoioli@suse.com

- disable pylint which is not compatible with upstream's version.
 We run it independently in Jenkins anyway

-------------------------------------------------------------------
Tue Mar 31 14:36:41 CEST 2015 - mc@suse.de

- version 2.1.15.6-1
- wsgi.input is only guaranteed to be readable once.
  Prevent to read it twice
- prevent squid 3.2 from detecting forwarding loops

-------------------------------------------------------------------
Thu Dec 04 13:25:58 CET 2014 - mc@suse.de

- version 2.1.15.5-1
- read systemid path from configuration
- configure proxy max memory file size separately from buffer_size

-------------------------------------------------------------------
Tue Jun 17 11:06:01 CEST 2014 - jrenner@suse.de

- version 2.1.15.4-1
- Add default path structure to proxy lookaside that avoids collisions
- Make rhnpush backwards-compatible with old spacewalk-proxy
- rhn_package_manager should not force md5; use package hearders

-------------------------------------------------------------------
Tue May 06 15:14:05 CEST 2014 - mc@suse.de

- version 2.1.15.3-1
- Proxy should not make bogus fqdn:port DNS queries
- unified SLP service identifiers (FATE#316384)

-------------------------------------------------------------------
Thu Feb 27 15:22:41 CET 2014 - fcastelli@suse.com

- version 2.1.15.2-1
- advertise registration URL via SLP
- Add SLP activation to configure-proxy.sh; fix SLP registration file for proxy

-------------------------------------------------------------------
Fri Feb 07 13:49:36 CET 2014 - mc@suse.de

- version 2.1.15.1-1
- add SLP support
- Updating the copyright years info

-------------------------------------------------------------------
Mon Jan 13 09:40:30 CET 2014 - mc@suse.de

- version 2.1.14.1-1
- Fixing typo in message

-------------------------------------------------------------------
Wed Dec 18 13:50:32 CET 2013 - mc@suse.de

- version 2.1.12.2-1
- Fixed client registration via proxy [bnc#855610]

-------------------------------------------------------------------
Mon Dec 09 16:50:37 CET 2013 - mc@suse.de

- version 2.1.12.1-1
- switch to 2.1

-------------------------------------------------------------------
Thu Nov 28 16:21:54 CET 2013 - mc@suse.de

- version 1.7.12.13-1
- /etc/hosts doesn't work with proxies (bnc#850983)

-------------------------------------------------------------------
Fri Sep 27 09:58:15 CEST 2013 - mc@suse.de

- version 1.7.12.12-1
- Add redirect for bootstrap repositories (FATE#315138)

-------------------------------------------------------------------
Wed Aug 21 15:35:50 CEST 2013 - mc@suse.de

- version 1.7.12.11-1
- add comment for new timeout option (bnc#833685)

-------------------------------------------------------------------
Wed Jun 12 13:24:25 CEST 2013 - mc@suse.de

- version 1.7.12.10-1
- make Proxy timeouts configurable (bnc#815460)
- Do not read response data into memory (bnc#801151)
- do not read data into memory which should be
  send to the server (bnc#801151)

-------------------------------------------------------------------
Fri Feb 08 11:04:34 CET 2013 - mc@suse.de

- version 1.7.12.9-1
- raise NotLocalError if package is not in cache file (bnc#799684)
- Remove superfluous stuff from cobbler-proxy.conf (bnc#796581)

-------------------------------------------------------------------
Thu Nov 22 15:27:54 CET 2012 - jrenner@suse.de

- version 1.7.12.8-1
- keep the proxy from trying to auth as 127.0.0.1
  (bnc#794825)

-------------------------------------------------------------------
Fri Oct 05 10:58:13 CEST 2012 - mc@suse.de

- version 1.7.12.7-1

-------------------------------------------------------------------
Fri Sep 28 16:13:32 CEST 2012 - mc@suse.de

- version 1.7.12.6-1
- separate proxy auth error hostname into separate header
  (bnc#783667)
- Don't expect string to already be imported
- multi-tiered proxies don't update auth tokens correctly
  (bnc#783667)

-------------------------------------------------------------------
Thu Aug 02 16:22:20 CEST 2012 - mc@suse.de

- version 1.7.12.5-1
- fixed man page
- removed dead --no-cache option

-------------------------------------------------------------------
Mon Jun 25 10:23:51 CEST 2012 - mc@suse.de

- version 1.7.12.4-1
- fixed man page for rhn-package-manager

-------------------------------------------------------------------
Thu Jun 21 11:43:35 CEST 2012 - jrenner@suse.de

- version 1.7.12.3-1
- use session based authentication

-------------------------------------------------------------------
Fri Apr 20 15:33:19 CEST 2012 - mc@suse.de

- version 1.7.12.2-1
- refresh proxy auth cache for hostname changes

-------------------------------------------------------------------
Thu Apr 19 16:25:05 CEST 2012 - mantel@suse.de

- fix broken squid/http_proxy require

-------------------------------------------------------------------
Thu Apr 19 14:17:58 CEST 2012 - mantel@suse.de

- require http_proxy instead of squid to allow use of squid3

-------------------------------------------------------------------
Fri Mar 23 11:29:59 CET 2012 - mc@suse.de

- rotate logfiles as user wwwrun (bnc#681984) CVE-2011-1550

-------------------------------------------------------------------
Wed Mar 21 17:47:50 CET 2012 - mc@suse.de

- version 1.7.12.1-1
- Bumping package version

-------------------------------------------------------------------
Tue Feb  7 16:31:28 CET 2012 - mantel@suse.de

- enable option FollowSymLinks, else SLES10-SP4 clients will
  not work (bootstrap is symlinked to SP3)(bnc#742473)

-------------------------------------------------------------------
Tue Sep 20 17:38:11 CEST 2011 - iartarisi@suse.cz

- use pylint instead of python-pylint for %checks

-------------------------------------------------------------------
Thu Aug 11 15:08:53 CEST 2011 - iartarisi@suse.cz

- delete xxmlrpclib
- fix other imports after the move from spacewalk.common

-------------------------------------------------------------------
Wed Aug 10 11:29:34 CEST 2011 - iartarisi@suse.cz

- fix imports after module layout changes in spacewalk.common

-------------------------------------------------------------------
Fri May  6 11:15:30 CEST 2011 - mc@suse.de

- redirect all required xmlrpc calles (bnc#692212)

-------------------------------------------------------------------
Thu Mar 31 11:00:41 CEST 2011 - mantel@suse.de

- more debranding

-------------------------------------------------------------------
Thu Mar 31 10:09:02 CEST 2011 - mc@suse.de
 
- add symlink spacewalk-proxy to rhn-proxy and debrand
  the start script (bnc#684033)

-------------------------------------------------------------------
Thu Mar  3 17:47:26 CET 2011 - mc@suse.de

- enable SSL in apache 

-------------------------------------------------------------------
Thu Mar  3 15:27:34 CET 2011 - mc@suse.de

- allow directory listing of /pub/ (bnc#676684) 

-------------------------------------------------------------------
Thu Mar  3 13:44:03 CET 2011 - mantel@suse.de

- add apache modules in proxy-common package

-------------------------------------------------------------------
Thu Mar  3 12:44:09 CET 2011 - mantel@suse.de

- move apache module configuration to main package

-------------------------------------------------------------------
Thu Mar  3 10:48:18 CET 2011 - mantel@suse.de

- adapt rhn-proxy for SUSE Manager

-------------------------------------------------------------------
Sun Jan 30 15:29:27 CET 2011 - mc@suse.de

- backport upstrem fixes

-------------------------------------------------------------------
Mon Nov 29 13:50:15 CET 2010 - mantel@suse.de

- use correct %{apache_user}

-------------------------------------------------------------------
Mon Nov 29 11:57:01 CET 2010 - mantel@suse.de

- ignore Requires: initscripts for now

-------------------------------------------------------------------
Mon Nov 29 08:36:24 CET 2010 - mantel@suse.de

- ignore Requires: sos for now

-------------------------------------------------------------------
Wed Nov 24 16:24:57 CET 2010 - mantel@suse.de

- fix Requires for SuSE

-------------------------------------------------------------------
Wed Sep 15 09:39:39 CEST 2010 - mantel@suse.de

- Initial release of spacewalk-proxy

0707010000003A000081B400000000000000000000000168DD3ED300000051000000000000000000000000000000000000004100000000spacewalk-proxy/spacewalk-proxy.changes.agraul.pylint-everything- Use startswith() or endswith() in Python code
- Allow existing pylint failures
0707010000003B000081B400000000000000000000000168DD3ED300000022000000000000000000000000000000000000003E00000000spacewalk-proxy/spacewalk-proxy.changes.agraul.reformat-black- Reformat Python code with black
0707010000003C000081B400000000000000000000000168DD3ED300000020000000000000000000000000000000000000004600000000spacewalk-proxy/spacewalk-proxy.changes.mcalmer.fix-changelog-formats- Fix syntax error in changelog
0707010000003D000081B400000000000000000000000168DD3ED300000041000000000000000000000000000000000000003F00000000spacewalk-proxy/spacewalk-proxy.changes.nadvornik.pxe-optimize- Handle large static files outside of wsgi script (bsc#1244424)
0707010000003E000081B400000000000000000000000168DD3ED300000124000000000000000000000000000000000000004200000000spacewalk-proxy/spacewalk-proxy.changes.oholecek.proxy_conf_reorg- Reorganize proxy apache configuration
  * remove unused access to pub dir
  * move cobbler configs from the uyuni-config to the proxy package
  * add max workers limit to 150 (bsc#1244552)
  * use proxypass instead of wsgi to pass API calls to the server
  and anonymous dirs (bsc#1241880)
0707010000003F000081B400000000000000000000000168DD3ED300003B8D000000000000000000000000000000000000002500000000spacewalk-proxy/spacewalk-proxy.spec#
# spec file for package spacewalk-proxy
#
# Copyright (c) 2025 SUSE LLC
# Copyright (c) 2008-2018 Red Hat, Inc.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#

%{!?python3_sitelib: %global python3_sitelib %(%{__python3} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}

Name:           spacewalk-proxy
Version:        5.2.0
Release:        0
Summary:        Spacewalk Proxy Server
License:        GPL-2.0-only
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
URL:            https://github.com/uyuni-project/uyuni
Source0:        https://github.com/spacewalkproject/spacewalk/archive/%{name}-%{version}.tar.gz
BuildRequires:  python3
Requires:       httpd
Requires:       python3-uyuni-common-libs
Requires:       spacewalk-certs-tools
Requires:       spacewalk-ssl-cert-check
BuildRequires:  make
BuildRequires:  mgr-push >= 4.0.0
BuildRequires:  python3-mgr-push
BuildRequires:  spacewalk-backend >= 1.7.24

%define rhnroot %{_usr}/share/rhn
%define destdir %{rhnroot}/proxy
%define rhnconf %{_sysconfdir}/rhn
%define python3rhnroot %{python3_sitelib}/spacewalk
%if 0%{?suse_version}
%define httpdconf %{_sysconfdir}/apache2/conf.d
%define apache_user wwwrun
%define apache_group www
%else
%define httpdconf %{_sysconfdir}/httpd/conf.d
%define apache_user apache
%define apache_group apache
%endif
BuildArch:      noarch

%description
This package is never built.

%package management
Summary:        Packages required by the Spacewalk Management Proxy
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
%if 0%{?suse_version}
Requires:       http_proxy
Requires:       openslp-server
%else
Requires:       squid
%endif
Requires:       %{name}-broker = %{version}
Requires:       %{name}-common >= %{version}
Requires:       %{name}-docs
Requires:       %{name}-html
Requires:       %{name}-redirect = %{version}
Requires:       httpd
Requires:       spacewalk-backend >= 1.7.24
%if 0%{?fedora} || 0%{?rhel}
Requires:       sos
Requires(preun): initscripts
%endif
BuildRequires:  /usr/bin/docbook2man

%description management
This package require all needed packages for Spacewalk Proxy Server.

%package broker
Summary:        The Broker component for the Spacewalk Proxy Server
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
Requires:       httpd
Requires:       spacewalk-proxy-package-manager
%if 0%{?suse_version}
Requires:       apache2-mod_wsgi
Requires:       apache2-prefork
%else
Requires:       mod_ssl
Requires:       python3-mod_wsgi
%endif
Requires(post): %{name}-common
Conflicts:      %{name}-redirect < %{version}-%{release}
Conflicts:      %{name}-redirect > %{version}-%{release}

%description broker
The Spacewalk Proxy Server allows package caching
and local package delivery services for groups of local servers from
Spacewalk Server. This service adds flexibility and economy of
resources to package update and deployment.

This package includes module, which request is cache-able and should
be sent to Squid and which should be sent directly to parent Spacewalk
server.

%package redirect
Summary:        The SSL Redirect component for the Spacewalk Proxy Server
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
Requires:       httpd
Requires:       spacewalk-proxy-broker = %{version}-%{release}

%description redirect
The Spacewalk Proxy Server allows package caching
and local package delivery services for groups of local servers from
Spacewalk Server. This service adds flexibility and economy of
resources to package update and deployment.

This package includes module, which handle request passed through squid
and assures a fully secure SSL connection is established and maintained
between an Spacewalk Proxy Server and parent Spacewalk server.

%package common
Summary:        Modules shared by Spacewalk Proxy components
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
Requires(pre):  uyuni-base-common
BuildRequires:  uyuni-base-common
%if 0%{?suse_version}
BuildRequires:  apache2
Requires:       apache2-mod_wsgi
%else
BuildRequires:  httpd
Requires:       mod_ssl
Requires:       python3-mod_wsgi
%endif
Requires:       %{name}-broker >= %{version}
Requires:       curl
Requires:       spacewalk-backend >= 1.7.24
Requires(pre):  policycoreutils

# weakremover used on SUSE to get rid of orphan packages which are
# unsupported and do not have a dependency anymore
Provides:       weakremover(jabberd)
Provides:       weakremover(jabberd-db)
Provides:       weakremover(jabberd-sqlite)
Provides:       weakremover(spacewalk-setup-jabberd)

%description common
The Spacewalk Proxy Server allows package caching
and local package delivery services for groups of local servers from
Spacewalk Server. This service adds flexibility and economy of
resources to package update and deployment.

This package contains the files shared by various
Spacewalk Proxy components.

%package package-manager
Summary:        Custom Channel Package Manager for the Spacewalk Proxy Server
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
Requires:       mgr-push >= 4.0.0
Requires:       python3
Requires:       python3-rhnlib >= 4.2.2
Requires:       spacewalk-backend >= 1.7.24
# proxy isn't Python 3 yet
Requires:       python3-mgr-push
BuildRequires:  /usr/bin/docbook2man
BuildRequires:  python3-devel

%description package-manager
The Spacewalk Proxy Server allows package caching
and local package delivery services for groups of local servers from
Spacewalk Server. This service adds flexibility and economy of
resources to package update and deployment.

This package contains the Command rhn_package_manager, which  manages
an Spacewalk Proxy Server\'s custom channel.

%package salt
Summary:        A ZeroMQ Proxy for Salt Minions
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group:          Applications/Internet
Requires:       systemd
Requires(pre):  salt
Requires(pre):  %{name}-common
%if 0%{?suse_version} >= 1210
BuildRequires:  systemd-rpm-macros
%endif
%{?systemd_requires}

%description salt
A ZeroMQ Proxy for Salt Minions

%prep
%setup -q

%build
make -f Makefile.proxy

# Fixing shebang for Python 3
for i in $(find . -type f);
do
    sed -i '1s=^#!/usr/bin/\(python\|env python\)[0-9.]*=#!/usr/bin/python3=' $i;
done

%install
install -d -m 755 %{buildroot}/%{_sysconfdir}/pki/tls/certs
install -d -m 755 %{buildroot}/%{_sysconfdir}/pki/tls/private

make -f Makefile.proxy install PREFIX=%{buildroot}
install -d -m 750 %{buildroot}/%{_var}/cache/rhn/proxy-auth
mkdir -p %{buildroot}/%{_sysconfdir}/slp.reg.d
install -m 0644 etc/slp.reg.d/susemanagerproxy.reg %{buildroot}/%{_sysconfdir}/slp.reg.d

mkdir -p %{buildroot}/%{_var}/spool/rhn-proxy/list

%if 0%{?suse_version}
mkdir -p %{buildroot}%{_sysconfdir}/apache2
mv %{buildroot}%{_sysconfdir}/httpd/conf.d %{buildroot}/%{httpdconf}
rm -rf %{buildroot}%{_sysconfdir}/httpd
%endif
touch %{buildroot}/%{httpdconf}/cobbler-proxy.conf

ln -sf rhn-proxy %{buildroot}%{_sbindir}/spacewalk-proxy

pushd %{buildroot}
%if 0%{?suse_version}
%py3_compile -O %{buildroot}
%else
%{py_byte_compile} %{python3} %{buildroot}
%endif
popd

install -m 0750 salt-broker/salt-broker %{buildroot}/%{_bindir}/
mkdir -p %{buildroot}/%{_sysconfdir}/salt/
install -m 0644 salt-broker/broker %{buildroot}/%{_sysconfdir}/salt/
install -d -m 755 %{buildroot}/%{_unitdir}/
install -D -m 444 salt-broker/salt-broker.service %{buildroot}/%{_unitdir}/salt-broker.service

ln -s %{_sbindir}/service %{buildroot}%{_sbindir}/rcsalt-broker

install -m 0755 mgr-proxy-ssh-push-init %{buildroot}/%{_sbindir}/mgr-proxy-ssh-push-init
install -m 0755 mgr-proxy-ssh-force-cmd %{buildroot}/%{_sbindir}/mgr-proxy-ssh-force-cmd
install -d -m 0755 %{buildroot}/%{_var}/lib/spacewalk

%check

%post broker
if [ -f %{_sysconfdir}/sysconfig/rhn/systemid ]; then
    chown root.%{apache_group} %{_sysconfdir}/sysconfig/rhn/systemid
    chmod 0640 %{_sysconfdir}/sysconfig/rhn/systemid
fi
%if 0%{?suse_version}
/sbin/service apache2 try-restart > /dev/null 2>&1 ||:
%else
/sbin/service httpd condrestart > /dev/null 2>&1
%endif

# In case of an upgrade, get the configured package list directory and clear it
# out.  Don't worry; it will be rebuilt by the proxy.

RHN_CONFIG_PY=%{python3rhnroot}/common/rhnConfig.py
RHN_PKG_DIR=%{_var}/spool/rhn-proxy

if [ -f $RHN_CONFIG_PY ] ; then

    # Check whether the config command supports the ability to retrieve a
    # config variable arbitrarily.  Versions of  < 4.0.6 (rhn) did not.

    CFG_RHN_PKG_DIR=$(%{__python3} $RHN_CONFIG_PY get proxy.broker pkg_dir)
    if [ -n "$CFG_RHN_PKG_DIR" -a $CFG_RHN_PKG_DIR != "None" ]; then
        RHN_PKG_DIR=$CFG_RHN_PKG_DIR
    fi
fi

rm -rf $RHN_PKG_DIR/list/*

# Make sure the scriptlet returns with success
exit 0

%post common
%if 0%{?suse_version}
sysconf_addword %{_sysconfdir}/sysconfig/apache2 APACHE_MODULES wsgi
sysconf_addword %{_sysconfdir}/sysconfig/apache2 APACHE_MODULES proxy
sysconf_addword %{_sysconfdir}/sysconfig/apache2 APACHE_MODULES rewrite
sysconf_addword %{_sysconfdir}/sysconfig/apache2 APACHE_MODULES version
sysconf_addword %{_sysconfdir}/sysconfig/apache2 APACHE_SERVER_FLAGS SSL
%endif

%post redirect
%if 0%{?suse_version}
/sbin/service apache2 try-restart > /dev/null 2>&1 ||:
%else
/sbin/service httpd condrestart > /dev/null 2>&1
%endif
# Make sure the scriptlet returns with success
exit 0

%pre salt
%if !0%{?rhel}
%service_add_pre salt-broker.service
%endif

%post salt
%if 0%{?rhel}
%{systemd_post} salt-broker.service
%else
%service_add_post salt-broker.service
%endif
systemctl enable salt-broker.service > /dev/null 2>&1 || :
systemctl start salt-broker.service > /dev/null 2>&1 || :

%preun salt
%if 0%{?rhel}
%systemd_preun salt-broker.service
%else
%service_del_preun salt-broker.service
%endif

%postun salt
%if 0%{?rhel}
%{systemd_postun} salt-broker.service
%else
%service_del_postun salt-broker.service
%endif

%preun broker
if [ $1 -eq 0 ] ; then
    # nuke the cache
    rm -rf %{_var}/cache/rhn/*
fi

%preun
if [ $1 = 0 ] ; then
%if 0%{?suse_version}
    /sbin/service apache2 try-restart > /dev/null 2>&1 ||:
%else
    /sbin/service httpd condrestart >/dev/null 2>&1
%endif
fi

%posttrans common
if [ -n "$1" ] ; then # anything but uninstall
    mkdir %{_localstatedir}/cache/rhn/proxy-auth 2>/dev/null
    chown %{apache_user}:root %{_localstatedir}/cache/rhn/proxy-auth
    restorecon %{_localstatedir}/cache/rhn/proxy-auth
fi

%files salt
%defattr(-,root,root)
%{_bindir}/salt-broker
%{_unitdir}/salt-broker.service
%{_sbindir}/rcsalt-broker
%config(noreplace) %{_sysconfdir}/salt/broker

%files broker
%defattr(-,root,root)
%dir %{destdir}
%{destdir}/broker/__init__.py*
%{destdir}/broker/rhnBroker.py*
%{destdir}/broker/rhnRepository.py*
%attr(750,%{apache_user},%{apache_group}) %dir %{_var}/spool/rhn-proxy
%attr(750,%{apache_user},%{apache_group}) %dir %{_var}/spool/rhn-proxy/list
%if 0%{?rhel}
%dir %{_var}/log/rhn
%else
%attr(770,root,%{apache_group}) %dir %{_var}/log/rhn
%endif
%config(noreplace) %{_sysconfdir}/logrotate.d/rhn-proxy-broker
# config files
%attr(644,root,%{apache_group}) %{_datadir}/rhn/config-defaults/rhn_proxy_broker.conf
%dir %{destdir}/broker/__pycache__/
%{destdir}/broker/__pycache__/*

%files redirect
%defattr(-,root,root)
%dir %{destdir}
%{destdir}/redirect/__init__.py*
%{destdir}/redirect/rhnRedirect.py*
%if 0%{?rhel}
%dir %{_var}/log/rhn
%else
%attr(770,root,%{apache_group}) %dir %{_var}/log/rhn
%endif
%config(noreplace) %{_sysconfdir}/logrotate.d/rhn-proxy-redirect
# config files
%attr(644,root,%{apache_group}) %{_datadir}/rhn/config-defaults/rhn_proxy_redirect.conf
%dir %{destdir}/redirect
%dir %{destdir}/redirect/__pycache__/
%{destdir}/redirect/__pycache__/*

%files common
%defattr(-,root,root)
%dir %{destdir}
%{destdir}/__init__.py*
%{destdir}/apacheServer.py*
%{destdir}/apacheHandler.py*
%{destdir}/rhnShared.py*
%{destdir}/rhnConstants.py*
%{destdir}/responseContext.py*
%{destdir}/rhnAuthCacheClient.py*
%{destdir}/rhnProxyAuth.py*
%{destdir}/rhnAuthProtocol.py*
%attr(750,%{apache_user},%{apache_group}) %dir %{_var}/spool/rhn-proxy
%attr(750,%{apache_user},%{apache_group}) %dir %{_var}/spool/rhn-proxy/list
%if 0%{?rhel}
%dir %{_var}/log/rhn
%else
%attr(770,root,%{apache_group}) %dir %{_var}/log/rhn
%endif
# config files
%attr(640,root,%{apache_group}) %config(noreplace) %{rhnconf}/rhn.conf
%attr(644,root,%{apache_group}) %{_datadir}/rhn/config-defaults/rhn_proxy.conf
%attr(644,root,%{apache_group}) %config %{httpdconf}/spacewalk-proxy.conf
%attr(644,root,%{apache_group}) %config %{httpdconf}/smlm-proxy-forwards.conf
# this file is created by either cli or webui installer
%ghost %config %{httpdconf}/cobbler-proxy.conf
%attr(644,root,%{apache_group}) %config %{httpdconf}/spacewalk-proxy-wsgi.conf
%{rhnroot}/wsgi/xmlrpc.py*
%{rhnroot}/wsgi/xmlrpc_redirect.py*
# the cache
%attr(750,%{apache_user},root) %dir %{_var}/cache/rhn
%attr(750,%{apache_user},root) %dir %{_var}/cache/rhn/proxy-auth
%dir %{rhnroot}
%dir %{rhnroot}/wsgi
%{_sbindir}/mgr-proxy-ssh-push-init
%{_sbindir}/mgr-proxy-ssh-force-cmd
%attr(755,root,root) %dir %{_var}/lib/spacewalk
%dir %{rhnroot}/wsgi/__pycache__/
%{rhnroot}/wsgi/__pycache__/*
%dir %{destdir}/broker
%dir %{destdir}/__pycache__/
%{destdir}/__pycache__/*
%dir %{_sysconfdir}/pki/tls
%dir %{_sysconfdir}/pki/tls/certs
%dir %{_sysconfdir}/pki/tls/private

%files package-manager
%defattr(-,root,root)
# config files
%attr(644,root,%{apache_group}) %{_datadir}/rhn/config-defaults/rhn_proxy_package_manager.conf
%{_bindir}/rhn_package_manager
%{rhnroot}/PackageManager/rhn_package_manager.py*
%{rhnroot}/PackageManager/__init__.py*
%{_mandir}/man8/rhn_package_manager.8%{?ext_man}
%dir %{rhnroot}/PackageManager
%dir %{rhnroot}/PackageManager/__pycache__/
%{rhnroot}/PackageManager/__pycache__/*

%files management
%defattr(-,root,root)
# dirs
%dir %{destdir}
# start/stop script
%attr(755,root,root) %{_sbindir}/rhn-proxy
%{_sbindir}/spacewalk-proxy
# mans
%{_mandir}/man8/rhn-proxy.8%{?ext_man}
%dir %{_datadir}/rhn
%dir %{_sysconfdir}/slp.reg.d
%config %{_sysconfdir}/slp.reg.d/susemanagerproxy.reg

%changelog
07070100000040000041FD00000000000000000000000268DD3ED300000000000000000000000000000000000000000000001500000000spacewalk-proxy/wsgi07070100000041000081B400000000000000000000000168DD3ED300000073000000000000000000000000000000000000001E00000000spacewalk-proxy/wsgi/Makefile#
# Makefile for proxy/wsgi
#

TOP	= ..
SUBDIR	= wsgi
FILES	= xmlrpc xmlrpc_redirect
include $(TOP)/Makefile.defs

07070100000042000081B400000000000000000000000168DD3ED300000351000000000000000000000000000000000000001F00000000spacewalk-proxy/wsgi/xmlrpc.py# pylint: disable=missing-module-docstring
#
# Copyright (c) 2010--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#

from wsgi import wsgiHandler


def application(environ, start_response):
    return wsgiHandler.handle(
        environ, start_response, "broker", "proxy.broker", "proxy.apacheServer"
    )
07070100000043000081B400000000000000000000000168DD3ED300000355000000000000000000000000000000000000002800000000spacewalk-proxy/wsgi/xmlrpc_redirect.py# pylint: disable=missing-module-docstring
#
# Copyright (c) 2010--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#

from wsgi import wsgiHandler


def application(environ, start_response):
    return wsgiHandler.handle(
        environ, start_response, "redirect", "proxy.redirect", "proxy.apacheServer"
    )
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!
openSUSE Build Service is sponsored by