File container.obscpio of Package kvm-server-container

07070100000000000081a40000000000000000000000016538743500000161000000000000000000000000000000000000001e00000000container/default_network.xml<network>
  <name>default_network</name>
  <uuid>f243d94b-bd5b-415d-b4c7-ccb78ec3dc9e</uuid>
  <forward mode='nat'/>
  <bridge name='virbr5' stp='on' delay='0'/>
  <mac address='52:54:00:d0:61:e9'/>
  <ip address='192.168.10.1' netmask='255.255.255.0'>
    <dhcp>
      <range start='192.168.10.2' end='192.168.10.254'/>
    </dhcp>
  </ip>
</network>

07070100000001000081ed0000000000000000000000016538743500001846000000000000000000000000000000000000001c00000000container/kvm-server-manage#!/bin/bash

N='\033[0m'
G='\033[0;32m'
Y='\033[1;33m'
R='\033[0;31m'
SYM='\xe2\x97\x8f'
SUCCESS='\xe2\x9c\x85'
FAIL='\xe2\x9d\x8c'
COMMAND=$1
LIBVIRTD_STATE=$(systemctl is-active libvirtd.socket)
CONTAINER_STATE=$(systemctl is-active kvm-server-container.service)

if [ "$EUID" != 0 ]; then
   sudo -S "$0" "$@"
   exit $?
fi

show_help() {
   cat <<EOF

   Usage: kvm-server-manage <command>

   Commands:
      enable:  Disables and stops monolithic libvirtd service if present
               Starts the KVM server container only if it isn't already running
               Enables and (re)starts the modular libvirt services
   
      restart: Performs the same actions as 'enable' but will also restart 
                  the KVM server container if active
               Warning: Running VMs will be stopped before the container is restarted
      
      disable: Disables and stops the modular libvirt daemons
               Disables and stops the KVM server container
      
      stop:    Stops the KVM Container
               Will be started again on next host boot unless 'disable' is called
      
      verify:  Prints whether the KVM server container and all required services are currently active
               Otherwise, prints inactive services that need to be addressed
      
      help:    Prints this help message

EOF
}

if [[ "$COMMAND" = "enable" || "$COMMAND" = "restart" ]]; then
   # Disable the libvirtd monolithic daemon if present
   if [ "$LIBVIRTD_STATE" = "active" ]; then
      systemctl stop libvirtd.service && \
         echo -e "${G}${SYM} ${N}Stopped libvirtd.service" || \
         echo -e "${R}${SYM} ${N}Failed to stop libvirtd.service"
      systemctl stop libvirtd{,-ro,-admin,-tcp,-tls}.socket
      systemctl disable libvirtd.service
      systemctl disable libvirtd{,-ro,-admin,-tcp,-tls}.socket
   fi

   # (Re)Start the kvm server container
   if [[ "$CONTAINER_STATE" != "active" || "$COMMAND" = "restart" ]]; then
      echo -e "${Y}${SYM} ${N}Starting KVM Container"
      systemctl daemon-reload
      systemctl enable kvm-server-container.service
      systemctl restart kvm-server-container.service && \
         echo -e "${G}${SYM} ${N}KVM Container Started" || \
         echo -e "${R}${SYM} ${N}KVM Container Failed to Start"
   fi

   # Enable modular libvirt daemons on the host
   for drv in log lock
   do
      systemctl enable container-virt${drv}d.service
      systemctl enable virt${drv}d{,-admin}.socket
      systemctl restart virt${drv}d{,-admin}.socket
      systemctl restart container-virt${drv}d.service && \
         echo -e "${G}${SYM} ${N}Started container-virt${drv}d.service" || \
         echo -e "${R}${SYM} ${N}Failed to start container-virt${drv}d.service"
   done

   for drv in qemu network nodedev nwfilter proxy secret storage
   do
      systemctl unmask container-virt${drv}d.service
      systemctl unmask virt${drv}d{,-ro,-admin}.socket
      systemctl enable container-virt${drv}d.service
      systemctl enable virt${drv}d{,-ro,-admin}.socket
      systemctl restart virt${drv}d{,-ro,-admin}.socket
      systemctl restart container-virt${drv}d.service && \
         echo -e "${G}${SYM} ${N}Started container-virt${drv}d.service" || \
         echo -e "${R}${SYM} ${N}Failed to start container-virt${drv}d.service"
   done

elif [[ "$CONTAINER_STATE" = "active" && ( "$COMMAND" = "stop" || "$COMMAND" = "disable" ) ]]; then
   # Disable modular libvirt daemons on the host
   if [ "$COMMAND" = "disable" ]; then
      for drv in qemu network nodedev nwfilter proxy secret storage
      do
         systemctl stop container-virt${drv}d.service && \
            echo -e "${G}${SYM} ${N}Stopped container-virt${drv}d.service" || \
            echo -e "${R}${SYM} ${N}Failed to stop container-virt${drv}d.service"
         systemctl stop virt${drv}d{,-ro,-admin}.socket
         systemctl disable container-virt${drv}d.service
         systemctl disable virt${drv}d{,-ro,-admin}.socket
      done

      for drv in log lock
      do
         systemctl stop container-virt${drv}d.service && \
            echo -e "${G}${SYM} ${N}Stopped container-virt${drv}d.service" || \
            echo -e "${R}${SYM} ${N}Failed to stop container-virt${drv}d.service"
         systemctl stop virt${drv}d{,-admin}.socket
         systemctl disable container-virt${drv}d.service
         systemctl disable virt${drv}d{,-admin}.socket
      done

      # Disable container service
      systemctl disable kvm-server-container.service
   fi

   # Stop the kvm server container. Stop the container for both "stop" and "disable"
   echo -e "${Y}${SYM} ${N}Stopping KVM Container"
   systemctl stop kvm-server-container.service && \
      echo -e "${G}${SYM} ${N}KVM Container Stopped" || \
      echo -e "${R}${SYM} ${N}KVM Container Failed to Stop"

# No-op stop and disable if the container has already been stopped. Needed for uninstall script
elif [[ "$CONTAINER_STATE" != "active" && ( "$COMMAND" = "stop" || "$COMMAND" = "disable" ) ]]; then
   echo "KVM Container already stopped. Nothing to do"

elif [ "$COMMAND" = "verify" ]; then
   # Account for libvirt services plus kvm-server-container service
   count=-1
   daemons=(qemu network nodedev nwfilter proxy secret storage log lock)

   if [ "$(systemctl is-active kvm-server-container.service)" = "active" ]; then
      let "count++"
   else
      echo -e "${R}${SYM} ${N}kvm-server-container.service is not active. See 'journalctl -xeu kvm-server-container.service' for more info"
   fi

   for drv in "${daemons[@]}"
   do
      if [ "$(systemctl is-active container-virt${drv}d.service)" = "active" ]; then
         let "count++"
      else
         echo -e "${R}${SYM} ${N}container-virt${drv}d.service is not active. See 'journalctl -xeu container-virt${drv}d.service' for more info"
      fi
   done

   if [ "${count}" -eq "${#daemons[@]}" ]; then
         echo -e "${G}${SUCCESS} ${N}All required services are currently active"
   else
         echo -e "${R}${FAIL} ${N}One or more required services are inactive"
   fi
elif [[ "$COMMAND" = "help" || "$COMMAND" = "--help" ]]; then
   show_help
else
   echo "kvm-server-manage: Unknown command \"$COMMAND\""
   show_help
fi
07070100000002000081a40000000000000000000000016538743500000410000000000000000000000000000000000000001a00000000container/kvm-server.conf# CONTAINER 
CONTAINER_NAME=kvm-server
# OFFICIAL image
IMAGE=registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kvm-server:latest

# For Dev builds
#IMAGE=registry.opensuse.org/virtualization/containerfile/suse/alp/workloads/kvm-server:latest
#IMAGE=localhost/kvmlocal:latest

# TODO: These Vars should be brought in by EnvironmentFile= in the systemd units but they never resolve correctly
# VARS 
DATA=/var/lib/libvirt/images
ETCLIBVIRT=/etc/libvirt
VARRUNLIBVIRT=/var/run/libvirt
QEMUFIRMWARE=/usr/share/qemu

# Virtual Machine configuration
# currently only 'OpenStack' contain cloud-init
#https://download.opensuse.org/tumbleweed/appliances/openSUSE-Tumbleweed-Minimal-VM.x86_64-kvm-and-xen.qcow2
APPLIANCE_MIRROR=https://download.opensuse.org/tumbleweed/appliances
APPLIANCE=openSUSE-Tumbleweed-Minimal-VM.x86_64-kvm-and-xen
BACKING_DIR=${DATA}
BACKING_FORMAT=qcow2
BACKING_STORE=${BACKING_DIR}/${APPLIANCE}.${BACKING_FORMAT}
DOMAIN=Tumbleweed-JeOS
BRIDGEIF=virbr5
DISKSIZE=8
VMMEMORY=1024
VCPU=1

07070100000003000081a4000000000000000000000001653874350000098d000000000000000000000000000000000000001800000000container/label-install#!/bin/bash
# This is the install script for kvm when run in a privileged
# container.

CONTAINER=kvm-server
# ETC
MAINCONF=${CONTAINER}.conf
NETCONF=libvirt/qemu/networks/default_network.xml
QEMUCONF=libvirt/qemu.conf
# BIN
VIRTINSTALLDEMO=virt-install-demo.sh
MANAGE=${CONTAINER}-manage

# Check for read only root filesystem
is_read_only() {
   [[ -n $(awk '$2 ~ /\/host$/ && $4 ~ /(^|,)ro($|,)/' /host/proc/mounts) ]]
}

# Install/update scripts on the host 
BIN_INSTALL_PATH=$(is_read_only && echo "/host/usr/local/bin" || echo "/host/usr/bin")
SYSTEMD_INSTALL_PATH=$(is_read_only && echo "/host/usr/local/lib/systemd/system" || echo "/host/usr/lib/systemd/system")
QEMU_FIRM_PATH=$(is_read_only && echo "/host/usr/local/share/qemu" || echo "/host/usr/share/qemu")

install_common() {
   mkdir -p /host/etc/libvirt
   mkdir -p /host/var/lib/libvirt/images
   mkdir -p /host/etc/libvirt/qemu/networks
   mkdir -p ${QEMU_FIRM_PATH}
   mkdir -p ${SYSTEMD_INSTALL_PATH}
}

install_bin() {
   SCRIPT=$1
   cp -a /container/${SCRIPT} ${BIN_INSTALL_PATH}/
   # ensure the script is executable in bin dir
   chmod 755 ${BIN_INSTALL_PATH}/${SCRIPT}
}

# Install but don't update config files
install_config() {
   CONF=$1
   if [ ! -e /host/etc/${CONF} ]; then
      cp -a /container/$(basename ${CONF}) /host/etc/${CONF}
   else
      echo "/host/etc/${CONF} already exist, will not update it"
   fi
}

install_units() {
   cp -a /container/systemd/* ${SYSTEMD_INSTALL_PATH}/
}

# "Export" the QEMU firmware directory for use by the kvm-client container
# It would be nice to put this in a named volume but that would involve calling
# podman from inside the container
install_firmware() {
   cp -ra /usr/share/qemu/* ${QEMU_FIRM_PATH}/
}

## MAIN
echo "Running Install Label"
install_common
install_config ${MAINCONF}
install_config ${NETCONF}
install_config ${QEMUCONF}
install_bin ${VIRTINSTALLDEMO}
install_bin ${MANAGE}
install_units
install_firmware

# save the image path for the container that was used to run the install
# as the default container image to use for the libvirtd service. The
# image path to use should be available in the IMAGE environment variable.
INSTALL_IMAGE=${IMAGE}
source /host/etc/${MAINCONF}
# IMAGE is now exported from /host/etc/${MAINCONF} and potentially different
if [ "${INSTALL_IMAGE}" != "${IMAGE}" ]; then
   sed -i "s|^IMAGE=.*$|DEFAULT_IMAGE=${IMAGE}\nIMAGE=${INSTALL_IMAGE}|" host/etc/${MAINCONF}
fi

07070100000004000081a400000000000000000000000165387435000008a0000000000000000000000000000000000000001a00000000container/label-uninstall#!/bin/bash

# This is the uninstall script for kvm when run in a privileged
# container.

CONTAINER=kvm-server

# Check for read only root filesystem
is_read_only() {
   [[ -n $(awk '$2 ~ /\/host$/ && $4 ~ /(^|,)ro($|,)/' /host/proc/mounts) ]]
}

delete_file() {
PATH=$1
FILE=$2
if [ ! -z "${PATH}/${FILE}" ]; then
	if [ ! -e "${PATH}/${FILE}" ]; then
		echo "${FILE} not present, nothing to remove"
	else
		/usr/bin/rm -f ${PATH}/${FILE}
	fi
fi
}

BIN_INSTALL_PATH=$(is_read_only && echo "/host/usr/local/bin" || echo "/host/usr/bin")
SYSTEMD_INSTALL_PATH=$(is_read_only && echo "/host/usr/local/lib/systemd/system" || echo "/host/usr/lib/systemd/system")
QEMU_FIRM_PATH=$(is_read_only && echo "/host/usr/local/share/qemu" || echo "/host/usr/share/qemu")

# removing installed files
echo "LABEL UNINSTALL: Removing all files"
delete_file /host/etc ${CONTAINER}.conf 
delete_file /host/etc/libvirt/qemu/networks default_network.xml 
delete_file ${BIN_INSTALL_PATH} virt-install-demo.sh
delete_file ${BIN_INSTALL_PATH} ${CONTAINER}-manage

# Remove systemd unit files from host
for drv in qemu network nodedev nwfilter proxy secret storage; do
   delete_file ${SYSTEMD_INSTALL_PATH} container-virt${drv}d.service
   delete_file ${SYSTEMD_INSTALL_PATH} virt${drv}d.socket
   delete_file ${SYSTEMD_INSTALL_PATH} virt${drv}d-ro.socket
   delete_file ${SYSTEMD_INSTALL_PATH} virt${drv}d-admin.socket
done

for drv in log lock; do
   delete_file ${SYSTEMD_INSTALL_PATH} container-virt${drv}d.service
   delete_file ${SYSTEMD_INSTALL_PATH} virt${drv}d.socket
   delete_file ${SYSTEMD_INSTALL_PATH} virt${drv}d-admin.socket
done

delete_file ${SYSTEMD_INSTALL_PATH} virtproxyd-tls.socket
delete_file ${SYSTEMD_INSTALL_PATH} virtproxyd-tcp.socket
delete_file ${SYSTEMD_INSTALL_PATH} libvirtd.socket
delete_file ${SYSTEMD_INSTALL_PATH} libvirtd-ro.socket
delete_file ${SYSTEMD_INSTALL_PATH} libvirtd-admin.socket
delete_file ${SYSTEMD_INSTALL_PATH} libvirtd-tls.socket
delete_file ${SYSTEMD_INSTALL_PATH} libvirtd-tcp.socket
delete_file ${SYSTEMD_INSTALL_PATH} kvm-server-container.service

# Remove installed libvirt configs
/usr/bin/rm -rf /host/etc/libvirt
# Remove qemu data dir
/usr/bin/rm -rf ${QEMU_FIRM_PATH}
07070100000005000081a40000000000000000000000016538743500000028000000000000000000000000000000000000001400000000container/qemu.confcgroup_controllers = []
namespaces = []
07070100000006000081a400000000000000000000000165387435000003f2000000000000000000000000000000000000002e00000000container/systemd/container-virtlockd.service[Unit]
Description=Virtual machine lock manager
Conflicts=libvirtd.service virtlockd.service
Requires=virtlockd.socket
Requires=virtlockd-admin.socket
Before=container-virtqemud.service
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtlockd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTLOCKD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtlockd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtlockd $VIRTLOCKD_ARGS
Restart=on-failure
RestartSec=2
# Losing the locks is a really bad thing that will
# cause the machine to be fenced (rebooted), so make
# sure we discourage OOM killer
OOMScoreAdjust=-900
# Needs to allow for max guests * average disks per guest
# libvirtd.service written to expect 4096 guests, so if we
# allow for 10 disks per guest, we get:
LimitNOFILE=40960

[Install]
Also=virtlockd.socket
07070100000007000081a40000000000000000000000016538743500000476000000000000000000000000000000000000002d00000000container/systemd/container-virtlogd.service[Unit]
Description=Virtual machine log manager
Conflicts=libvirtd.service virtlogd.service 
Requires=virtlogd.socket
Requires=virtlogd-admin.socket
Before=container-virtqemud.service
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtlogd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTLOGD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtlogd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtlogd $VIRTLOGD_ARGS
Restart=on-failure
RestartSec=2
# Losing the logs is a really bad thing that will
# cause the machine to be fenced (rebooted), so make
# sure we discourage OOM killer
OOMScoreAdjust=-900
# Need to have at least one file open per guest (eg QEMU
# stdio log), but might be more (eg serial console logs)
# A common case is OpenStack which often has up to 4 file
# handles per guest.
# libvirtd.service written to expect 4096 guests, so if we
# guess at 4 files per guest here that is 16k:
LimitNOFILE=16384

[Install]
Also=virtlogd.socket
07070100000008000081a400000000000000000000000165387435000003b3000000000000000000000000000000000000003100000000container/systemd/container-virtnetworkd.service[Unit]
Description=Virtualization network daemon
Conflicts=libvirtd.service virtnetworkd.service 
Requires=virtnetworkd.socket
Requires=virtnetworkd-ro.socket
Requires=virtnetworkd-admin.socket
After=network.target
After=firewalld.service
After=iptables.service
After=ip6tables.service
After=dbus.service
After=apparmor.service
After=local-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtnetworkd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTNETWORKD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/kvm-server.conf
EnvironmentFile=-/etc/sysconfig/container-virtnetworkd
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtnetworkd $VIRTNETWORKD_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtnetworkd.socket
Also=virtnetworkd-ro.socket
Also=virtnetworkd-admin.socket
07070100000009000081a4000000000000000000000001653874350000036b000000000000000000000000000000000000003100000000container/systemd/container-virtnodedevd.service[Unit]
Description=Virtualization nodedev daemon
Conflicts=libvirtd.service virtnodedevd.service
Requires=virtnodedevd.socket
Requires=virtnodedevd-ro.socket
Requires=virtnodedevd-admin.socket
After=network.target
After=dbus.service
After=apparmor.service
After=local-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtnodedevd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTNODEDEVD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtnodedevd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtnodedevd $VIRTNODEDEVD_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtnodedevd.socket
Also=virtnodedevd-ro.socket
Also=virtnodedevd-admin.socket
0707010000000a000081a40000000000000000000000016538743500000378000000000000000000000000000000000000003200000000container/systemd/container-virtnwfilterd.service[Unit]
Description=Virtualization nwfilter daemon
Conflicts=libvirtd.service virtnwfilterd.service
Requires=virtnwfilterd.socket
Requires=virtnwfilterd-ro.socket
Requires=virtnwfilterd-admin.socket
After=network.target
After=dbus.service
After=apparmor.service
After=local-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtnwfilterd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTNWFILTERD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtnwfilterd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtnwfilterd $VIRTNWFILTERD_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtnwfilterd.socket
Also=virtnwfilterd-ro.socket
Also=virtnwfilterd-admin.socket
0707010000000b000081a4000000000000000000000001653874350000034b000000000000000000000000000000000000002f00000000container/systemd/container-virtproxyd.service[Unit]
Description=Virtualization daemon
Conflicts=libvirtd.service virtproxyd.service
Requires=virtproxyd.socket
Requires=virtproxyd-ro.socket
Requires=virtproxyd-admin.socket
After=network.target
After=dbus.service
After=apparmor.service
After=local-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtproxyd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTPROXYD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtproxyd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtproxyd $VIRTPROXYD_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtproxyd.socket
Also=virtproxyd-ro.socket
Also=virtproxyd-admin.socket
0707010000000c000081a4000000000000000000000001653874350000070b000000000000000000000000000000000000002e00000000container/systemd/container-virtqemud.service[Unit]
Description=Virtualization qemu daemon
Conflicts=libvirtd.service virtqemud.service
Requires=virtlogd.socket
Requires=virtlockd.socket
Requires=virtqemud.socket
Requires=virtqemud-ro.socket
Requires=virtqemud-admin.socket
Wants=systemd-machined.service
Before=libvirt-guests.service
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtqemud(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTQEMUD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtqemud
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtqemud $VIRTQEMUD_ARGS
Restart=on-failure
RestartSec=2
# At least 1 FD per guest, often 2 (eg qemu monitor + qemu agent).
# eg if we want to support 4096 guests, we'll typically need 8192 FDs
# If changing this, also consider container-virtlogd.service & container-virtlockd.service
# limits which are also related to number of guests
LimitNOFILE=8192
# The cgroups pids controller can limit the number of tasks started by
# the daemon, which can limit the number of domains for some hypervisors.
# A conservative default of 8 tasks per guest results in a TasksMax of
# 32k to support 4096 guests.
TasksMax=32768
# With cgroups v2 there is no devices controller anymore, we have to use
# eBPF to control access to devices.  In order to do that we create a eBPF
# hash MAP which locks memory.  The default map size for 64 devices together
# with program takes 12k per guest.  After rounding up we will get 64M to
# support 4096 guests.
LimitMEMLOCK=64M

[Install]
WantedBy=multi-user.target
Also=virtlogd.socket
Also=virtlockd.socket
Also=virtqemud.socket
Also=virtqemud-ro.socket
Also=virtqemud-admin.socket
0707010000000d000081a4000000000000000000000001653874350000035e000000000000000000000000000000000000003000000000container/systemd/container-virtsecretd.service[Unit]
Description=Virtualization secret daemon
Conflicts=libvirtd.service virtsecretd.service
Requires=virtsecretd.socket
Requires=virtsecretd-ro.socket
Requires=virtsecretd-admin.socket
After=network.target
After=dbus.service
After=apparmor.service
After=local-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtsecretd(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTSECRETD_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtsecretd
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtsecretd $VIRTSECRETD_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtsecretd.socket
Also=virtsecretd-ro.socket
Also=virtsecretd-admin.socket
0707010000000e000081a40000000000000000000000016538743500000397000000000000000000000000000000000000003100000000container/systemd/container-virtstoraged.service[Unit]
Description=Virtualization storage daemon
Conflicts=libvirtd.service virtstoraged.service
Requires=virtstoraged.socket
Requires=virtstoraged-ro.socket
Requires=virtstoraged-admin.socket
After=network.target
After=dbus.service
After=iscsid.service
After=apparmor.service
After=local-fs.target
After=remote-fs.target
After=kvm-server-container.service
BindsTo=kvm-server-container.service
Documentation=man:virtstoraged(8)
Documentation=https://libvirt.org

[Service]
Type=forking
Environment=VIRTSTORAGED_ARGS=""
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/container-virtstoraged
EnvironmentFile=-/etc/kvm-server.conf
ExecStart=/usr/bin/podman exec --detach --privileged ${CONTAINER_NAME} /usr/sbin/virtstoraged $VIRTSTORAGED_ARGS
Restart=on-failure
RestartSec=2

[Install]
WantedBy=multi-user.target
Also=virtstoraged.socket
Also=virtstoraged-ro.socket
Also=virtstoraged-admin.socket
0707010000000f000081a40000000000000000000000016538743500000693000000000000000000000000000000000000002f00000000container/systemd/kvm-server-container.service[Unit]
Description=Meta service for containerized virtualization daemon
Conflicts=libvirtd.service
Wants=systemd-machined.service
Before=container-virtlogd.service
Before=container-virtlockd.service
Before=container-virtqemud.service
Before=container-virtnetworkd.service
Before=container-virtnwfilterd.service
Before=container-virtnodedevd.service
Before=container-virtsecretd.service
Before=container-virtstoraged.service
After=network-online.target
After=dbus.service
After=apparmor.service
After=local-fs.target
After=remote-fs.target
After=systemd-logind.service
After=systemd-machined.service

[Service]
Type=notify
NotifyAccess=all
Environment=IMAGE="registry.opensuse.org/suse/alp/workloads/tumbleweed_containerfiles/suse/alp/workloads/kvm-server:latest"
Environment=CONTAINER_NAME="kvm-server"
EnvironmentFile=-/etc/sysconfig/kvm-server-container
EnvironmentFile=-/etc/kvm-server.conf
ExecStartPre=/bin/rm -f %t/%n.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/mkdir -p /run/libvirt
ExecStart=/usr/bin/podman run --conmon-pidfile %t/%n.pid --cidfile %t/%n.ctr-id --cgroups=no-conmon --sdnotify=conmon --init --detach --replace --rm --net=host --privileged --cgroupns=host -e IMAGE=${IMAGE} -v /:/host -v /run/libvirt:/run/libvirt -v /etc/libvirt:/etc/libvirt -v /var/lib/libvirt/images:/var/lib/libvirt/images --name ${CONTAINER_NAME} ${IMAGE} /usr/bin/sleep infinity
ExecStop=-+virsh -c qemu:///system stop --all
ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n.ctr-id -t 10
ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/%n.ctr-id
KillMode=control-group
Restart=on-failure
TimeoutStartSec=70
TimeoutStopSec=120
RestartSec=1

[Install]
WantedBy=multi-user.target

07070100000010000081a40000000000000000000000016538743500000120000000000000000000000000000000000000002900000000container/systemd/virtlockd-admin.socket[Unit]
Description=Virtual machine lock manager admin socket
Before=container-virtqemud.service
BindsTo=virtlockd.socket
After=virtlockd.socket

[Socket]
ListenStream=/run/libvirt/virtlockd-admin-sock
Service=container-virtlockd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000011000081a40000000000000000000000016538743500000107000000000000000000000000000000000000002300000000container/systemd/virtlockd.socket[Unit]
Description=Virtual machine lock manager socket
Before=container-virtqemud.service
Before=container-virtlockd.service

[Socket]
ListenStream=/run/libvirt/virtlockd-sock
Service=container-virtlockd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000012000081a40000000000000000000000016538743500000115000000000000000000000000000000000000002800000000container/systemd/virtlogd-admin.socket[Unit]
Description=Virtual machine log manager socket
Before=container-virtqemud.service
BindsTo=virtlogd.socket
After=virtlogd.socket

[Socket]
ListenStream=/run/libvirt/virtlogd-admin-sock
Service=container-virtlogd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000013000081a40000000000000000000000016538743500000103000000000000000000000000000000000000002200000000container/systemd/virtlogd.socket[Unit]
Description=Virtual machine log manager socket
Before=container-virtqemud.service
Before=container-virtlogd.service

[Socket]
ListenStream=/run/libvirt/virtlogd-sock
Service=container-virtlogd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000014000081a40000000000000000000000016538743500000123000000000000000000000000000000000000002c00000000container/systemd/virtnetworkd-admin.socket[Unit]
Description=Libvirt network admin socket
Before=container-virtnetworkd.service
BindsTo=virtnetworkd.socket
After=virtnetworkd.socket


[Socket]
ListenStream=/run/libvirt/virtnetworkd-admin-sock
Service=container-virtnetworkd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000015000081a4000000000000000000000001653874350000012a000000000000000000000000000000000000002900000000container/systemd/virtnetworkd-ro.socket[Unit]
Description=Libvirt network local read-only socket
Before=container-virtnetworkd.service
BindsTo=virtnetworkd.socket
After=virtnetworkd.socket


[Socket]
ListenStream=/run/libvirt/virtnetworkd-sock-ro
Service=container-virtnetworkd.service
SocketMode=0666

[Install]
WantedBy=sockets.target
07070100000016000081a400000000000000000000000165387435000000f8000000000000000000000000000000000000002600000000container/systemd/virtnetworkd.socket[Unit]
Description=Libvirt network local socket
Before=container-virtnetworkd.service


[Socket]
ListenStream=/run/libvirt/virtnetworkd-sock
Service=container-virtnetworkd.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
07070100000017000081a40000000000000000000000016538743500000123000000000000000000000000000000000000002c00000000container/systemd/virtnodedevd-admin.socket[Unit]
Description=Libvirt nodedev admin socket
Before=container-virtnodedevd.service
BindsTo=virtnodedevd.socket
After=virtnodedevd.socket


[Socket]
ListenStream=/run/libvirt/virtnodedevd-admin-sock
Service=container-virtnodedevd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000018000081a4000000000000000000000001653874350000012a000000000000000000000000000000000000002900000000container/systemd/virtnodedevd-ro.socket[Unit]
Description=Libvirt nodedev local read-only socket
Before=container-virtnodedevd.service
BindsTo=virtnodedevd.socket
After=virtnodedevd.socket


[Socket]
ListenStream=/run/libvirt/virtnodedevd-sock-ro
Service=container-virtnodedevd.service
SocketMode=0666

[Install]
WantedBy=sockets.target
07070100000019000081a400000000000000000000000165387435000000f8000000000000000000000000000000000000002600000000container/systemd/virtnodedevd.socket[Unit]
Description=Libvirt nodedev local socket
Before=container-virtnodedevd.service


[Socket]
ListenStream=/run/libvirt/virtnodedevd-sock
Service=container-virtnodedevd.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
0707010000001a000081a40000000000000000000000016538743500000129000000000000000000000000000000000000002d00000000container/systemd/virtnwfilterd-admin.socket[Unit]
Description=Libvirt nwfilter admin socket
Before=container-virtnwfilterd.service
BindsTo=virtnwfilterd.socket
After=virtnwfilterd.socket


[Socket]
ListenStream=/run/libvirt/virtnwfilterd-admin-sock
Service=container-virtnwfilterd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
0707010000001b000081a40000000000000000000000016538743500000130000000000000000000000000000000000000002a00000000container/systemd/virtnwfilterd-ro.socket[Unit]
Description=Libvirt nwfilter local read-only socket
Before=container-virtnwfilterd.service
BindsTo=virtnwfilterd.socket
After=virtnwfilterd.socket


[Socket]
ListenStream=/run/libvirt/virtnwfilterd-sock-ro
Service=container-virtnwfilterd.service
SocketMode=0666

[Install]
WantedBy=sockets.target
0707010000001c000081a400000000000000000000000165387435000000fc000000000000000000000000000000000000002700000000container/systemd/virtnwfilterd.socket[Unit]
Description=Libvirt nwfilter local socket
Before=container-virtnwfilterd.service


[Socket]
ListenStream=/run/libvirt/virtnwfilterd-sock
Service=container-virtnwfilterd.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
0707010000001d000081a40000000000000000000000016538743500000181000000000000000000000000000000000000002a00000000container/systemd/virtproxyd-admin.socket[Unit]
Description=Libvirt proxy admin socket
Before=container-virtproxyd.service
BindsTo=virtproxyd.socket
After=virtproxyd.socket
Conflicts=libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket libvirtd-tls.socket

[Socket]
ListenStream=/run/libvirt/virtproxyd-admin-sock
Service=container-virtproxyd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
0707010000001e000081a40000000000000000000000016538743500000188000000000000000000000000000000000000002700000000container/systemd/virtproxyd-ro.socket[Unit]
Description=Libvirt proxy local read-only socket
Before=container-virtproxyd.service
BindsTo=virtproxyd.socket
After=virtproxyd.socket
Conflicts=libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket libvirtd-tls.socket

[Socket]
ListenStream=/run/libvirt/virtproxyd-sock-ro
Service=container-virtproxyd.service
SocketMode=0666

[Install]
WantedBy=sockets.target
0707010000001f000081a40000000000000000000000016538743500000159000000000000000000000000000000000000002800000000container/systemd/virtproxyd-tcp.socket[Unit]
Description=Libvirt proxy non-TLS IP socket
Before=container-virtproxyd.service
BindsTo=virtproxyd.socket
After=virtproxyd.socket
Conflicts=libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket libvirtd-tls.socket

[Socket]
ListenStream=16509
Service=container-virtproxyd.service

[Install]
WantedBy=sockets.target
07070100000020000081a40000000000000000000000016538743500000155000000000000000000000000000000000000002800000000container/systemd/virtproxyd-tls.socket[Unit]
Description=Libvirt proxy TLS IP socket
Before=container-virtproxyd.service
BindsTo=virtproxyd.socket
After=virtproxyd.socket
Conflicts=libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket libvirtd-tls.socket

[Socket]
ListenStream=16514
Service=container-virtproxyd.service

[Install]
WantedBy=sockets.target
07070100000021000081a4000000000000000000000001653874350000015a000000000000000000000000000000000000002400000000container/systemd/virtproxyd.socket[Unit]
Description=Libvirt proxy local socket
Before=container-virtproxyd.service
Conflicts=libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket libvirtd-tls.socket

[Socket]
ListenStream=/run/libvirt/virtproxyd-sock
Service=container-virtproxyd.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
07070100000022000081a40000000000000000000000016538743500000111000000000000000000000000000000000000002900000000container/systemd/virtqemud-admin.socket[Unit]
Description=Libvirt qemu admin socket
Before=container-virtqemud.service
BindsTo=virtqemud.socket
After=virtqemud.socket


[Socket]
ListenStream=/run/libvirt/virtqemud-admin-sock
Service=container-virtqemud.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000023000081a40000000000000000000000016538743500000118000000000000000000000000000000000000002600000000container/systemd/virtqemud-ro.socket[Unit]
Description=Libvirt qemu local read-only socket
Before=container-virtqemud.service
BindsTo=virtqemud.socket
After=virtqemud.socket


[Socket]
ListenStream=/run/libvirt/virtqemud-sock-ro
Service=container-virtqemud.service
SocketMode=0666

[Install]
WantedBy=sockets.target
07070100000024000081a400000000000000000000000165387435000000ec000000000000000000000000000000000000002300000000container/systemd/virtqemud.socket[Unit]
Description=Libvirt qemu local socket
Before=container-virtqemud.service


[Socket]
ListenStream=/run/libvirt/virtqemud-sock
Service=container-virtqemud.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
07070100000025000081a4000000000000000000000001653874350000011d000000000000000000000000000000000000002b00000000container/systemd/virtsecretd-admin.socket[Unit]
Description=Libvirt secret admin socket
Before=container-virtsecretd.service
BindsTo=virtsecretd.socket
After=virtsecretd.socket


[Socket]
ListenStream=/run/libvirt/virtsecretd-admin-sock
Service=container-virtsecretd.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000026000081a40000000000000000000000016538743500000124000000000000000000000000000000000000002800000000container/systemd/virtsecretd-ro.socket[Unit]
Description=Libvirt secret local read-only socket
Before=container-virtsecretd.service
BindsTo=virtsecretd.socket
After=virtsecretd.socket


[Socket]
ListenStream=/run/libvirt/virtsecretd-sock-ro
Service=container-virtsecretd.service
SocketMode=0666

[Install]
WantedBy=sockets.target
07070100000027000081a400000000000000000000000165387435000000f4000000000000000000000000000000000000002500000000container/systemd/virtsecretd.socket[Unit]
Description=Libvirt secret local socket
Before=container-virtsecretd.service


[Socket]
ListenStream=/run/libvirt/virtsecretd-sock
Service=container-virtsecretd.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
07070100000028000081a40000000000000000000000016538743500000123000000000000000000000000000000000000002c00000000container/systemd/virtstoraged-admin.socket[Unit]
Description=Libvirt storage admin socket
Before=container-virtstoraged.service
BindsTo=virtstoraged.socket
After=virtstoraged.socket


[Socket]
ListenStream=/run/libvirt/virtstoraged-admin-sock
Service=container-virtstoraged.service
SocketMode=0600

[Install]
WantedBy=sockets.target
07070100000029000081a4000000000000000000000001653874350000012a000000000000000000000000000000000000002900000000container/systemd/virtstoraged-ro.socket[Unit]
Description=Libvirt storage local read-only socket
Before=container-virtstoraged.service
BindsTo=virtstoraged.socket
After=virtstoraged.socket


[Socket]
ListenStream=/run/libvirt/virtstoraged-sock-ro
Service=container-virtstoraged.service
SocketMode=0666

[Install]
WantedBy=sockets.target
0707010000002a000081a400000000000000000000000165387435000000f7000000000000000000000000000000000000002600000000container/systemd/virtstoraged.socket[Unit]
Description=Libvirt storage local socket
Before=container-virtstoraged.service

[Socket]
ListenStream=/run/libvirt/virtstoraged-sock
Service=container-virtstoraged.service
SocketMode=0666
RemoveOnStop=yes

[Install]
WantedBy=sockets.target
0707010000002b000041ed0000000000000000000000016538743500000000000000000000000000000000000000000000001200000000container/systemd0707010000002c000081a40000000000000000000000016538743500000788000000000000000000000000000000000000001f00000000container/virt-install-demo.sh#!/bin/bash

set -eo pipefail

if [ -z ${CONF} ]; then CONF=/etc/kvm-server.conf; fi
if [ -z ${DEFAULT_CONF} ]; then DEFAULT_CONF=/etc/default/kvm-server; fi

echo "using ${CONF} as configuration file"

# Check for read only root filesystem
BIN_INSTALL_PATH=$([[ -n $(awk '$2 ~ /\/$/ && $4 ~ /(^|,)ro($|,)/' /proc/mounts) ]] && echo "/usr/local/bin" || echo "/usr/bin")

check_load_config_file() {
if [ -f ${CONF} ]; then
    source ${CONF}
else
    echo "!! ${CONF} not found in path !!"
    exit 1
fi
if [ -e ${DEFAULT_CONF} ]; then
       source ${DEFAULT_CONF}
fi
}

get_disk_image() {
if [ ! -f ${DATA}/${APPLIANCE}.${BACKING_FORMAT} ]; then
	pushd ${DATA}
	curl -L -o ${DATA}/${APPLIANCE}.${BACKING_FORMAT} ${APPLIANCE_MIRROR}/${APPLIANCE}.${BACKING_FORMAT}
	popd
fi
}

start_default_network() {
   ${BIN_INSTALL_PATH}/virsh net-list --inactive --name | grep -qF "default_network" && ${BIN_INSTALL_PATH}/virsh net-start default_network || echo "default_network already started"
}

get_vm_name() {
   RANDOMSTRING=`openssl rand -hex 5`
   VMNAME=${DOMAIN}_${RANDOMSTRING}
}

# ignition is not used right now
#cp -v VM_config.ign ${DATA}

create_vm() {
${BIN_INSTALL_PATH}/virt-install \
    --connect qemu:///system \
    --import \
    --name ${VMNAME} \
    --osinfo opensusetumbleweed \
    --virt-type kvm --hvm \
    --machine q35 --boot uefi \
    --cpu host-passthrough \
    --video vga \
    --console pty,target.type=virtio \
    --autoconsole text \
    --network network=default_network \
    --rng /dev/urandom \
    --vcpu ${VCPU} --memory ${VMMEMORY} \
    --cloud-init \
    --disk path=${BACKING_STORE},bus=virtio,cache=none \
    --graphics vnc,listen=0.0.0.0,port=5950

# ignition needs another variant of image
#    --sysinfo type=fwcfg,entry0.name="opt/com.coreos/config",entry0.file="${BACKING_DIR}/VM_config.ign" \
}

check_load_config_file
get_disk_image
get_vm_name
start_default_network
create_vm
0707010000002d000041ed0000000000000000000000016538743500000000000000000000000000000000000000000000000a00000000container07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000b00000000TRAILER!!!
openSUSE Build Service is sponsored by