File virt-create-rootfs.patch of Package libvirt

Index: libvirt-1.2.5/tools/Makefile.am
===================================================================
--- libvirt-1.2.5.orig/tools/Makefile.am
+++ libvirt-1.2.5/tools/Makefile.am
@@ -44,6 +44,7 @@ EXTRA_DIST = \
 	virt-sanlock-cleanup.in				\
 	virt-sanlock-cleanup.8				\
 	virt-login-shell.pod				\
+	virt-create-rootfs.pod				\
 	virsh.pod					\
 	libvirt-guests.sysconf				\
 	virsh-edit.c					\
@@ -74,6 +75,7 @@ endif WITH_SANLOCK
 if WITH_LXC
 conf_DATA += virt-login-shell.conf
 bin_PROGRAMS += virt-login-shell
+bin_SCRIPTS += virt-create-rootfs
 else ! WITH_LXC
 EXTRA_DIST += virt-login-shell.conf
 endif ! WITH_LXC
@@ -85,9 +87,9 @@ dist_man1_MANS = \
 		virt-xml-validate.1 \
 		virsh.1
 if WITH_LXC
-dist_man1_MANS += virt-login-shell.1
+dist_man1_MANS += virt-login-shell.1 virt-create-rootfs.1
 else ! WITH_LXC
-EXTRA_DIST += virt-login-shell.1
+EXTRA_DIST += virt-login-shell.1 virt-create-rootfs.1
 endif ! WITH_LXC
 if WITH_SANLOCK
 dist_man8_MANS = virt-sanlock-cleanup.8
@@ -251,6 +253,11 @@ virsh.1: virsh.pod $(top_srcdir)/configu
 	$(AM_V_GEN)$(POD2MAN) $< $(srcdir)/$@ \
 	    && if grep 'POD ERROR' $(srcdir)/$@ ; then \
 		rm $(srcdir)/$@; exit 1; fi
+
+virt-create-rootfs.1: virt-create-rootfs.pod $(top_srcdir)/configure.ac
+	$(AM_V_GEN)$(POD2MAN) $< $(srcdir)/$@ \
+	    && if grep 'POD ERROR' $(srcdir)/$@ ; then \
+		rm $(srcdir)/$@; exit 1; fi
 
 install-data-local: install-init install-systemd
 
Index: libvirt-1.2.5/tools/Makefile.in
===================================================================
--- libvirt-1.2.5.orig/tools/Makefile.in
+++ libvirt-1.2.5/tools/Makefile.in
@@ -84,9 +84,10 @@ bin_PROGRAMS = virsh$(EXEEXT) virt-host-
 @WITH_SANLOCK_TRUE@am__append_1 = virt-sanlock-cleanup
 @WITH_LXC_TRUE@am__append_2 = virt-login-shell.conf
 @WITH_LXC_TRUE@am__append_3 = virt-login-shell
+@WITH_LXC_TRUE@am__append_3_1 = virt-create-rootfs
 @WITH_LXC_FALSE@am__append_4 = virt-login-shell.conf \
-@WITH_LXC_FALSE@	virt-login-shell.1
-@WITH_LXC_TRUE@am__append_5 = virt-login-shell.1
+@WITH_LXC_FALSE@	virt-login-shell.1 virt-create-rootfs.1
+@WITH_LXC_TRUE@am__append_5 = virt-login-shell.1 virt-create-rootfs.1
 @WITH_WIN_ICON_TRUE@am__append_6 = virsh_win_icon.$(OBJEXT)
 @LIBVIRT_INIT_SCRIPT_RED_HAT_TRUE@am__append_7 = libvirt-guests.init
 @LIBVIRT_INIT_SCRIPT_SYSTEMD_TRUE@am__append_8 = libvirt-guests.service
@@ -1872,11 +1873,11 @@ EXTRA_DIST = $(ICON_FILES) $(conf_DATA)
 	virsh-network.c virsh-nodedev.c virsh-nwfilter.c virsh-pool.c \
 	virsh-secret.c virsh-snapshot.c virsh-volume.c $(am__append_4) \
 	libvirt-guests.sh.in libvirt-guests.init.in \
-	libvirt-guests.service.in
+	libvirt-guests.service.in virt-create-rootfs.pod
 DISTCLEANFILES = $(am__append_1) $(BUILT_SOURCES)
 confdir = $(sysconfdir)/libvirt
 conf_DATA = $(am__append_2)
-bin_SCRIPTS = virt-xml-validate virt-pki-validate
+bin_SCRIPTS = virt-xml-validate virt-pki-validate $(am__append_3_1)
 libexec_SCRIPTS = libvirt-guests.sh
 @WITH_SANLOCK_TRUE@sbin_SCRIPTS = virt-sanlock-cleanup
 dist_man1_MANS = virt-host-validate.1 virt-pki-validate.1 \
@@ -2872,6 +2873,11 @@ virsh.1: virsh.pod $(top_srcdir)/configu
 	$(AM_V_GEN)$(POD2MAN) $< $(srcdir)/$@ \
 	    && if grep 'POD ERROR' $(srcdir)/$@ ; then \
 		rm $(srcdir)/$@; exit 1; fi
+
+virt-create-rootfs.1: virt-create-rootfs.pod $(top_srcdir)/configure.ac
+	$(AM_V_GEN)$(POD2MAN) $< $(srcdir)/$@ \
+	    && if grep 'POD ERROR' $(srcdir)/$@ ; then \
+		rm $(srcdir)/$@; exit 1; fi
 
 install-data-local: install-init install-systemd
 
Index: libvirt-1.2.5/tools/virt-create-rootfs
===================================================================
--- /dev/null
+++ libvirt-1.2.5/tools/virt-create-rootfs
@@ -0,0 +1,197 @@
+#!/bin/sh
+
+function fail
+{
+    echo $1
+    exit 1
+}
+
+function print_help
+{
+cat << EOF
+virt-create-rootfs --root /path/to/rootfs [ARGS]
+
+Create a new root file system to use for distribution containers.
+
+ARGUMENTS
+
+    -h, --help          print this help and exit
+    -r, --root          path where to create the root FS
+    -d, --distro        distribution to install
+    -a, --arch          target architecture
+    -c, --regcode       registration code for the product
+    -p, --root-pass     the root password to set in the root FS 
+    --dry-run           don't actually run it
+EOF
+}
+
+ARCH=$(uname -i)
+ROOT=
+DISTRO=
+REG_CODE=
+ROOT_PASS=
+DRY_RUN=
+
+while test $# -gt 0
+do
+    case $1 in
+
+    -h | --help)
+        # usage and help
+        print_help
+        ;;
+
+    -r | --root)
+        if test $# -lt 2; then
+            fail "$1 needs a value"
+        fi
+        ROOT="$2"
+        shift
+        ;;
+
+    -a | --arch)
+        if test $# -lt 2; then
+            fail "$1 needs a value"
+        fi
+        case "$2" in
+            i586 | x86_64)
+                ARCH=$2
+                shift
+                ;;
+            *)
+                fail "$1 valid values are 'i586', 'x86_64'"
+        esac
+        # Sanity checks for the arch
+        HOST_ARCH=$(uname -i)
+        case "$HOST_ARCH" in
+            i?86)
+                if test $ARCH = "x86_64"; then
+                    fail "Host won't run x86_64 container"
+                fi
+            ;;
+        esac
+        ;;
+
+    -d | --distro)
+        if test $# -lt 2; then
+            fail "$1 needs a value"
+        fi
+        case "$2" in
+            SLED-* | SLES-* | openSUSE-*)
+                DISTRO=$2
+                shift
+                ;;
+            *)
+                fail "$1 valid values are 'SLED-*', 'SLES-*', 'openSUSE-*'"
+        esac
+        ;;
+
+    -c | --regcode)
+        if test $# -lt 2; then
+            fail "$1 needs a value"
+        fi
+        REG_CODE=$2
+        shift
+        ;;
+
+    -p | --root-pass)
+        if test $# -lt 2; then
+            fail "$1 needs a value"
+        fi
+        ROOT_PASS=$2
+        shift
+        ;;
+
+    --dry-run)
+        DRY_RUN="yes"
+        ;;
+
+    *)
+        fail "Unknown option: $1"
+        ;;
+    esac
+
+    shift
+done
+
+if test -z "$ROOT"; then
+    fail "--root argument need to be provided"
+fi
+
+RUN=
+if test "$DRY_RUN" = "yes"; then
+    RUN="echo"
+fi
+
+function call_zypper
+{
+    $RUN zypper --root "$ROOT" $*
+}
+
+function install_sle
+{
+    PRODUCT="$1"
+    VERSION="$2"
+
+    case "$VERSION" in
+        12.0)
+            # Transform into zypper internal version scheme
+            VERSION="12"
+            ;;
+        *)
+            fail "Unhandled SLE version: $VERSION"
+            ;;
+    esac
+
+    if test -z "$REG_CODE"; then
+        fail "Registration code is needed"
+    fi
+
+    # First copy the SUSE GPG keys from the host to the new root
+    rpm -qa gpg-pubkey\* --qf "%{name}-%{version}-%{release}: %{summary}\n" | \
+    grep 'gpg(SuSE Package Signing Key <build@suse.de>)' | \
+    while read -r line; do
+        key=$(echo $line | cut -d ':' -f 1)
+        tmpkey=$(mktemp)
+        rpm -qi $key | sed -n '/BEGIN/,/END/p' > "$tmpkey"
+        rpm --root "$ROOT" --import "$tmpkey"
+        rm "$tmpkey"
+    done
+
+    # SUSE Connect adds the repositories, and refreshes them,
+    # but requires the GPG key to be already imported
+    $RUN SUSEConnect -p "$PRODUCT/$VERSION/$ARCH" --root "$ROOT" -r "$REG_CODE"
+
+    # Then we install what we need
+    call_zypper in -t pattern Minimal
+}
+
+case "$DISTRO" in
+    SLED-*)
+        install_sle "SLED" "${DISTRO:5}"
+        ;;
+    SLED-* | SLES-*)
+        install_sle "SLES" "${DISTRO:5}"
+        ;;
+
+    openSUSE-*)
+        VERSION=${DISTRO:9}
+        case "$VERSION" in
+            13.1)
+                REPO="http://download.opensuse.org/distribution/13.1/repo/oss/"
+                UPDATE_REPO="http://download.opensuse.org/update/13.1/"
+                ;;
+            *)
+                fail "Unhandled openSUSE version: $VERSION"
+                ;;
+        esac
+        call_zypper ar "$REPO" "openSUSE"
+        call_zypper ar "$UPDATE_REPO" "openSUSE udpate"
+        call_zypper in --no-recommends -t pattern base
+        ;;
+esac
+
+if test "$DRY_RUN" != "yes"; then
+    echo "pts/0" >> "$ROOT/etc/securetty"
+    chroot "$ROOT" /usr/bin/passwd
+fi
Index: libvirt-1.2.5/tools/virt-create-rootfs.pod
===================================================================
--- /dev/null
+++ libvirt-1.2.5/tools/virt-create-rootfs.pod
@@ -0,0 +1,72 @@
+=head1 NAME
+
+virt-create-rootfs - tool to create a root file system for distro containers.
+
+=head1 SYNOPSIS
+
+B<virt-create-rootfs> [I<OPTION>]
+
+=head1 DESCRIPTION
+
+The B<virt-create-rootfs> program is a shell script setting up a root file
+system for a distribution container.
+
+The basic structure of most virt-create-rootfs usage is:
+
+  virt-create-rootfs -r /path/to/root -d distro-name
+
+=head1 OPTIONS
+
+=over
+
+=item B<-h, --help>
+
+Display command line help usage then exit.
+
+=item B<-r, --root>
+
+Set the path where to create the new root file system.
+
+=item B<-d, --distro>
+
+Set the name of distribution to use for the root file system.
+
+As of now, only SLED-I<XXX>, SLES-I<XXX> and openSUSE-I<XXX> are implemented
+where I<XXX> is the version number. Note that SUSEConnect is required to
+handle SLE distributions.
+
+=item B<-a, --arch>
+
+Set the target architecture of the root file system to either i586 or x86_64.
+
+=item B<-c, --regcode>
+
+Set the registration code for the product to install in the root file system.
+For SLE distributions, use a registration code from SUSE Customer Center.
+
+=item B<-p, --root-pass>
+
+If defined, set the root password for the new root file system.
+
+=item B<--dry-run>
+
+Don't do anything, just report what would be done.
+
+=back
+
+=head1 COPYRIGHT
+
+Copyright (C) 2014 SUSE LINUX Products GmbH, Nuernberg, Germany.
+
+=head1 LICENSE
+
+virt-create-rootfs is distributed under the terms of the GNU LGPL v2+.
+This is free software; see the source for copying conditions. There
+is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR
+PURPOSE
+
+=head1 SEE ALSO
+
+L<virsh(1)>, L<http://www.libvirt.org/>
+
+=cut