File mgradm.obscpio of Package uyuni-tools
07070100000000000081a400000000000000000000000168ed21dd000013a1000000000000000000000000000000000000001c00000000mgradm/cmd/backup/backup.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package backup
import (
"errors"
"fmt"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/create"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/restore"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func newCreateCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[shared.Flagpole]) *cobra.Command {
var flags shared.Flagpole
createCmd := &cobra.Command{
Use: "create output-directory",
Args: cobra.ExactArgs(1),
Short: L("Create backup"),
Long: L("Create backup of the already configured Uyuni system"),
RunE: func(cmd *cobra.Command, args []string) error {
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
createCmd.Flags().StringSlice("skipvolumes", []string{}, L("Skip backup of selected volumes"))
createCmd.Flags().StringSlice("extravolumes", []string{}, L("Backup additional volumes to the build-in ones"))
createCmd.Flags().Bool("skipdatabase", false, L("Do not backup database volume, allow online backup."))
createCmd.Flags().Bool("skipimages", false, L("Do not backup container images"))
createCmd.Flags().Bool("skipconfig", false, L("Do not backup podman configuration. On restore defaults will be used"))
createCmd.Flags().Bool("norestart", false, L("Do not restart services after backup is done"))
createCmd.Flags().Bool("dryrun", false, L("Print expected actions, but no action is done"))
return createCmd
}
func newRestoreCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[shared.Flagpole]) *cobra.Command {
var flags shared.Flagpole
restoreCmd := &cobra.Command{
Use: "restore directory",
Args: cobra.ExactArgs(1),
Short: L("Restore backup from the directory"),
Long: L("Restore backup of the previously configured Uyuni system from a specified directory"),
RunE: func(cmd *cobra.Command, args []string) error {
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
restoreCmd.Flags().StringSlice("skipvolumes", []string{}, L("Skip restore of selected volumes"))
restoreCmd.Flags().Bool("skipdatabase", false, L("Do not restore database volume"))
restoreCmd.Flags().Bool("skipimages", false, L("Skip restore of container images"))
restoreCmd.Flags().Bool("skipconfig", false, L("Do not restore podman configuration. Defaults will be used"))
restoreCmd.Flags().Bool("restart", false, L("Restart service after restore is done"))
restoreCmd.Flags().Bool("dryRun", false, L("Print expected actions, but no action is done"))
restoreCmd.Flags().Bool("force", false, L("Force overwrite of existing items"))
restoreCmd.Flags().Bool("continue", false, L("Skip existing items and restore the rest"))
restoreCmd.Flags().Bool("skipverify", false, L("Skip verification of the backup files"))
return restoreCmd
}
// NewCommand command for distribution management.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
backupCmd := &cobra.Command{
Use: "backup",
GroupID: "tool",
Short: L("Backup solution"),
Long: L("Tools for local backup management"),
}
backupCmd.AddCommand(newCreateCmd(globalFlags, doBackup))
backupCmd.AddCommand(newRestoreCmd(globalFlags, doRestore))
return backupCmd
}
// Backup helper to catch errors with unified error message.
func doBackup(
global *types.GlobalFlags,
flags *shared.Flagpole,
cmd *cobra.Command,
args []string,
) error {
outputDirectory := args[0]
err := create.Create(global, flags, cmd, args)
if err != nil {
var backupError *shared.BackupError
ok := errors.As(err, &backupError)
if ok {
// l10n-ignore
log.Error().Msgf("%s", backupError.Err.Error())
if backupError.Abort && backupError.DataRemains {
return fmt.Errorf(L("Backup aborted, partially backed up files remains in '%s'"), outputDirectory)
}
if !backupError.Abort {
// nolint:lll
return errors.New(L("Important data were backed up successfully, but errors were present. Restore will use default values where needed"))
}
}
return err
}
return nil
}
// Backup helper to catch errors with unified error message.
func doRestore(
global *types.GlobalFlags,
flags *shared.Flagpole,
cmd *cobra.Command,
args []string,
) error {
err := restore.Restore(global, flags, cmd, args)
if err != nil {
var backupError *shared.BackupError
ok := errors.As(err, &backupError)
if ok {
log.Warn().Err(backupError).Msgf(L("Encountered problems:"))
if backupError.Abort && backupError.DataRemains {
return errors.New(L("Restore aborted with partially restored files. Resolve the error and try again"))
}
if !backupError.Abort {
return errors.New(L("Important data were restored successfully, but with warnings"))
}
}
return err
}
return nil
}
07070100000001000081a400000000000000000000000168ed21dd00001c7a000000000000000000000000000000000000002300000000mgradm/cmd/backup/create/create.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package create
import (
"errors"
"fmt"
"os"
"path"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
podman_mgradm "github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var runCmdOutput = utils.RunCmdOutput
func Create(
_ *types.GlobalFlags,
flags *shared.Flagpole,
_ *cobra.Command,
args []string,
) error {
dryRun := flags.DryRun
outputDirectory := args[0]
printIntro(outputDirectory, flags)
if err := SanityChecks(outputDirectory); err != nil {
return shared.AbortError(err, false)
}
volumesBackupPath := path.Join(outputDirectory, shared.VolumesSubdir)
imagesBackupPath := path.Join(outputDirectory, shared.ImagesSubdir)
if err := prepareOuputDirs([]string{outputDirectory, volumesBackupPath, imagesBackupPath}, dryRun); err != nil {
return shared.AbortError(err, false)
}
volumes := gatherVolumesToBackup(flags.ExtraVolumes, flags.SkipVolumes, flags.SkipDatabase)
images := gatherContainerImagesToBackup(flags.SkipImages)
if !dryRun {
if err := shared.StorageCheck(volumes, images, outputDirectory); err != nil {
return shared.AbortError(err, false)
}
}
// stop service if database is to be backed up. Otherwise do a live backup
serviceStopped := false
if !flags.SkipDatabase && !dryRun {
log.Info().Msg(L("Stopping server service"))
if err := podman_mgradm.StopServices(); err != nil {
return shared.AbortError(err, false)
}
serviceStopped = true
}
if err := backupVolumes(volumes, volumesBackupPath, dryRun); err != nil {
return shared.AbortError(err, true)
}
// Remaining backups are not critical, restore can create default values
// so let's only track if there was an error
hasError := backupContainerImages(images, imagesBackupPath, dryRun)
// systemd configuration backup is optional as we have defaults to use
hasError = utils.JoinErrors(hasError, backupSystemdServices(outputDirectory, dryRun))
// podman configuration backup is optional as we have defaults to use
hasError = utils.JoinErrors(hasError, backupPodmanConfiguration(outputDirectory, dryRun))
// start service if it was stopped before
if serviceStopped && !flags.NoRestart && !dryRun {
log.Info().Msg(L("Restarting server service"))
hasError = utils.JoinErrors(hasError, podman_mgradm.StartServices())
}
log.Info().Msgf(L("Backup finished into %s"), outputDirectory)
return shared.ReportError(hasError)
}
func printIntro(outputDir string, flags *shared.Flagpole) {
log.Debug().Msg("Creating backup with options:")
log.Debug().Msgf("output directory: %s", outputDir)
log.Debug().Msgf("dry run: %t", flags.DryRun)
log.Debug().Msgf("skip database: %t", flags.SkipDatabase)
log.Debug().Msgf("skip config: %t", flags.SkipConfig)
log.Debug().Msgf("skip restart: %t", flags.NoRestart)
log.Debug().Msgf("skip images: %t", flags.SkipImages)
log.Debug().Msgf("skip volumes: %s", flags.SkipVolumes)
log.Debug().Msgf("extra volumes: %s", flags.ExtraVolumes)
}
func prepareOuputDirs(outputDirs []string, dryRun bool) error {
for _, d := range outputDirs {
if dryRun {
log.Info().Msgf(L("Would create '%s' directory"), d)
} else {
if err := os.Mkdir(d, 0622); err != nil {
return fmt.Errorf(L("unable to create target output directory: %w"), err)
}
}
}
return nil
}
func gatherVolumesToBackup(extraVolumes []string, skipVolumes []string, skipDatabase bool) []string {
// Construct work volume list, start with extra volumes
volumes := extraVolumes
//First add databasse volumes
if !skipDatabase {
for _, volume := range utils.PgsqlRequiredVolumeMounts {
volumes = append(volumes, volume.Name)
}
}
// Extra handling to skip all other volues
if len(skipVolumes) == 1 && skipVolumes[0] == "all" {
return volumes
}
// Add other server volumes and skip if needed
for _, volume := range utils.ServerVolumeMounts {
if !utils.Contains(skipVolumes, volume.Name) {
volumes = append(volumes, volume.Name)
}
}
// Remove duplicates
var uniqueVolumes []string
for _, volume := range volumes {
if !utils.Contains(uniqueVolumes, volume) {
uniqueVolumes = append(uniqueVolumes, volume)
}
}
return uniqueVolumes
}
func backupVolumes(volumes []string, outputDirectory string, dryRun bool) error {
log.Info().Msg(L("Backing up container volumes"))
for _, volume := range volumes {
log.Debug().Msgf("Backing up %s volume", volume)
if err := podman.ExportVolume(volume, outputDirectory, dryRun); err != nil {
return err
}
}
return nil
}
func gatherContainerImagesToBackup(skipImages bool) []string {
images := []string{}
if !skipImages {
for _, service := range utils.UyuniServices {
serviceName, skip := findService(service.Name)
if skip {
continue
}
image := podman.GetServiceImage(serviceName)
if image != "" {
present, err := podman.IsImagePresent(image)
if err == nil && len(present) > 0 {
images = append(images, image)
}
}
}
}
return images
}
func backupContainerImages(images []string, outputDirectory string, dryRun bool) error {
log.Info().Msg(L("Backing up container images"))
var hasError error
for _, image := range images {
log.Debug().Msgf("Backing up image %s", image)
if err := podman.ExportImage(image, outputDirectory, dryRun); err != nil {
log.Warn().Err(err).Msgf(L("Not backing up image %s"), image)
hasError = utils.JoinErrors(hasError, err)
}
}
return hasError
}
func backupSystemdServices(outputDirectory string, dryRun bool) error {
errorMessage := L("Systemd services and configuration was not backed up")
log.Info().Msg(L("Backing up Systemd services"))
if err := exportSystemdConfiguration(outputDirectory, dryRun); err != nil {
log.Warn().Err(err).Msg(errorMessage)
return err
}
if dryRun {
return nil
}
if err := utils.CreateChecksum(path.Join(outputDirectory, shared.SystemdConfBackupFile)); err != nil {
log.Warn().Err(err).Msg(errorMessage)
return err
}
return nil
}
func backupPodmanConfiguration(outputDirectory string, dryRun bool) error {
errorMessage := L("Podman configuration was not backed up")
log.Info().Msg(L("Backing up podman configuration"))
if err := exportPodmanConfiguration(outputDirectory, dryRun); err != nil {
log.Warn().Err(err).Msg(errorMessage)
return err
}
if dryRun {
return nil
}
if err := utils.CreateChecksum(path.Join(outputDirectory, shared.PodmanConfBackupFile)); err != nil {
log.Warn().Err(err).Msg(errorMessage)
return err
}
return nil
}
func SanityChecks(outputDirectory string) error {
if err := shared.SanityChecks(); err != nil {
return err
}
if utils.FileExists(outputDirectory) {
if !utils.IsEmptyDirectory(outputDirectory) {
return fmt.Errorf(L("output directory %s already exists and is not empty"), outputDirectory)
}
}
hostData, err := podman.InspectHost()
if err != nil {
return err
}
if !hostData.HasUyuniServer {
return errors.New(L("server is not initialized."))
}
return nil
}
07070100000002000081a400000000000000000000000168ed21dd00001048000000000000000000000000000000000000002900000000mgradm/cmd/backup/create/podman_utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package create
import (
"archive/tar"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
backup "github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// ExportConfiguration creates a tarball with podman network and secrets configuration.
// If dryRun is true, only messages explaining what to do will be logged.
func exportPodmanConfiguration(outputDir string, dryRun bool) error {
network, errNetwork := backupPodmanNetwork(dryRun)
secrets, errPodman := backupPodmanSecrets(dryRun)
if dryRun {
return nil
}
// Create output file
out, err := os.Create(path.Join(outputDir, backup.PodmanConfBackupFile))
if err != nil {
return fmt.Errorf(L("failed to create podman backup tarball: %w"), err)
}
defer out.Close()
// Prepare tar buffer
tw := tar.NewWriter(out)
defer tw.Close()
var hasError error
if errNetwork != nil {
log.Warn().Msg(L("Network was not backed up"))
} else {
header := &tar.Header{
Name: backup.NetworkOutputFile,
Mode: 0622,
Size: int64(len(network)),
}
if err := tw.WriteHeader(header); err != nil {
hasError = err
}
if _, err := tw.Write(network); err != nil {
hasError = utils.JoinErrors(hasError, err)
}
}
if errPodman != nil {
log.Warn().Msg(L("Podman secrets were not backed up"))
} else {
header := &tar.Header{
Name: backup.SecretBackupFile,
Mode: 0600,
Size: int64(len(secrets)),
}
if err := tw.WriteHeader(header); err != nil {
hasError = utils.JoinErrors(hasError, err)
}
if _, err := tw.Write(secrets); err != nil {
hasError = utils.JoinErrors(hasError, err)
}
}
return hasError
}
func backupPodmanNetwork(dryRun bool) ([]byte, error) {
networkExportCommand := []string{"podman", "network", "inspect", podman.UyuniNetwork}
if dryRun {
log.Info().Msgf(L("Would run %s"), strings.Join(networkExportCommand, " "))
return nil, nil
}
output, err := runCmdOutput(zerolog.DebugLevel, networkExportCommand[0], networkExportCommand[1:]...)
if err != nil {
log.Warn().Err(err).Msg(L("Failed to export network data"))
return nil, err
}
return output, nil
}
func backupPodmanSecrets(dryRun bool) ([]byte, error) {
const secretFile = "/var/lib/containers/storage/secrets/filedriver/secretsdata.json"
secretListCommand := []string{"podman", "secret", "ls", "--format", "{{range .}}{{.Name}}:{{.ID}},{{end}}"}
if dryRun {
log.Info().Msgf(L("Would run %s"), strings.Join(secretListCommand, " "))
return nil, nil
}
output, err := runCmdOutput(zerolog.DebugLevel, secretListCommand[0], secretListCommand[1:]...)
if err != nil {
log.Warn().Err(err).Msg(L("Failed to export secrets data"))
return nil, err
}
type SecretMap struct {
Name string
ID string
}
secretMappings := []SecretMap{}
for _, v := range strings.Split(string(output), ",") {
tmp := strings.SplitN(v, ":", 2)
// Ignore different length, usually last emptry string
if len(tmp) == 2 {
secretMappings = append(secretMappings, SecretMap{Name: tmp[0], ID: tmp[1]})
}
}
// load secretFile as json
output, err = os.ReadFile(secretFile)
if err != nil {
log.Warn().Err(err).Msg(L("Failed to read secrets data"))
return nil, err
}
var podmanSecrets map[string]string
if err := json.Unmarshal(output, &podmanSecrets); err != nil {
log.Warn().Err(err).Msg(L("Unable to decode podman secrets"))
return nil, err
}
// store id -> secret file in tar ball location
backupSecretMap := []backup.BackupSecretMap{}
for _, secretMap := range secretMappings {
for secretID, secretValue := range podmanSecrets {
if secretMap.ID == secretID {
backupSecretMap = append(backupSecretMap, backup.BackupSecretMap{Name: secretMap.Name, Secret: secretValue})
}
}
}
output, err = json.Marshal(backupSecretMap)
if err != nil {
log.Warn().Err(err).Msg(L("Unable to encode secrets backup"))
return nil, err
}
return output, nil
}
07070100000003000081a400000000000000000000000168ed21dd00000b2c000000000000000000000000000000000000002a00000000mgradm/cmd/backup/create/systemd_utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package create
import (
"archive/tar"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
backup "github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
func exportSystemdConfiguration(outputDir string, dryRun bool) error {
filesToBackup := gatherSystemdItems()
if dryRun {
log.Info().Msgf(L("Would backup %s"), filesToBackup)
return nil
}
// Create output file
out, err := os.Create(path.Join(outputDir, backup.SystemdConfBackupFile))
if err != nil {
return fmt.Errorf(L("failed to create Systemd backup tarball: %w"), err)
}
defer out.Close()
// Prepare tar buffer
tw := tar.NewWriter(out)
defer tw.Close()
for _, fileToBackup := range filesToBackup {
f, err := os.Open(fileToBackup)
if err != nil {
return err
}
fstat, _ := f.Stat()
h, err := tar.FileInfoHeader(fstat, "")
if err != nil {
return err
}
// Produced header does not have full path, overwrite it
h.Name = fileToBackup
if fstat.IsDir() {
h.Name += "/"
}
if err := tw.WriteHeader(h); err != nil {
return err
}
if fstat.IsDir() {
continue
}
if _, err := io.Copy(tw, f); err != nil {
return err
}
}
return nil
}
// For each container get service file, service.d and its content.
func gatherSystemdItems() []string {
result := []string{}
for _, service := range utils.UyuniServices {
serviceName, skip := findService(service.Name)
if skip {
continue
}
servicePath, err := systemd.GetServiceProperty(serviceName, podman.FragmentPath)
if err != nil {
log.Debug().Err(err).Msgf("failed to get the path to the %s service file", serviceName)
// Skipping the dropins since we would likely get a similar error.
continue
}
result = append(result, servicePath)
// Get the drop in files
dropIns, err := systemd.GetServiceProperty(serviceName, podman.DropInPaths)
if err != nil {
log.Debug().Err(err).Msgf("failed to get the path to the %s service configuration files", serviceName)
} else {
dropIns := strings.Split(dropIns, " ")
result = append(result, filepath.Dir(dropIns[0]))
result = append(result, dropIns[:]...)
}
}
return result
}
func findService(name string) (serviceName string, skip bool) {
skip = false
serviceName = name
if !systemd.HasService(serviceName) {
// with optional or more replicas we have service template, check if the service exists at all
serviceName = name + "@"
if !systemd.HasService(serviceName) {
log.Debug().Msgf("No service found for %s, skipping", name)
skip = true
}
}
return
}
07070100000004000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001900000000mgradm/cmd/backup/create07070100000005000081a400000000000000000000000168ed21dd000017e3000000000000000000000000000000000000002a00000000mgradm/cmd/backup/restore/podman_utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restore
import (
"archive/tar"
"encoding/base64"
"encoding/json"
"errors"
"io"
"os"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func restorePodmanConfiguration(podmanBackupFile string, flags *shared.Flagpole) error {
// Read tarball
backupFile, err := os.Open(podmanBackupFile)
if err != nil {
return err
}
defer backupFile.Close()
var hasError error
tr := tar.NewReader(backupFile)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch header.Name {
case shared.NetworkOutputFile:
hasError = utils.JoinErrors(hasError, restorePodmanNetwork(header, tr, flags))
case shared.SecretBackupFile:
hasError = utils.JoinErrors(hasError, restorePodmanSecrets(header, tr, flags))
default:
log.Warn().Msgf(L("Ignoring unexpected file in the podman backup %s"), header.Name)
}
}
return hasError
}
// parseNetworkData decodes stored podman network inspect result.
// We are not interested in all data, so selectively decode intereting bits.
func parseNetworkData(data []byte) (networkDetails shared.PodmanNetworkConfigData, err error) {
var networkData []map[string]json.RawMessage
if err = json.Unmarshal(data, &networkData); err != nil {
log.Warn().Msg(L("Unable to decode network data backup"))
return
}
err = errors.New(L("Incorrect network data backup"))
if len(networkData) != 1 {
return
}
if _, ok := networkData[0]["subnets"]; !ok {
return
}
if _, ok := networkData[0]["network_interface"]; !ok {
return
}
// Optional
if _, ok := networkData[0]["network_dns_servers"]; ok {
if err = json.Unmarshal(networkData[0]["network_dns_servers"], &networkDetails.NetworkDNSServers); err != nil {
return
}
}
if err = json.Unmarshal(networkData[0]["subnets"], &networkDetails.Subnets); err != nil {
return
}
if err = json.Unmarshal(networkData[0]["network_interface"], &networkDetails.NetworkInsterface); err != nil {
return
}
return networkDetails, nil
}
func defaultPodmanNetwork(flags *shared.Flagpole) error {
if podman.IsNetworkPresent(podman.UyuniNetwork) {
if flags.ForceRestore {
podman.DeleteNetwork(false)
} else {
return errors.New(L("podman network already exists"))
}
}
if err := podman.SetupNetwork(false); err != nil {
log.Error().
Msg(L("Unable to create podman network! Check the error and create network manually before starting the service"))
return err
}
return nil
}
func restorePodmanNetwork(header *tar.Header, tr *tar.Reader, flags *shared.Flagpole) error {
if flags.DryRun {
log.Info().Msgf(L("Would restore network configuration"))
return nil
}
data := make([]byte, header.Size)
if _, err := tr.Read(data); err != io.EOF {
log.Warn().Msg(L("Failed to read backed up network configuration, trying default"))
return utils.JoinErrors(err, defaultPodmanNetwork(flags))
}
log.Trace().Msgf("Loaded network data: %s", data)
networkDetails, err := parseNetworkData(data)
if err != nil {
log.Warn().Err(err).Msg(L("Failed to decode backed up network configuration, trying default"))
return utils.JoinErrors(err, defaultPodmanNetwork(flags))
}
if podman.IsNetworkPresent(podman.UyuniNetwork) {
if flags.ForceRestore {
podman.DeleteNetwork(false)
} else {
log.Warn().Msg(L("Podman network already exists, not restoring unless forced"))
return errors.New(L("podman network already exists"))
}
}
command := []string{"podman", "network", "create", "--interface-name", networkDetails.NetworkInsterface}
for _, v := range networkDetails.Subnets {
command = append(command, "--subnet", v.Subnet, "--gateway", v.Gateway)
}
for _, v := range networkDetails.NetworkDNSServers {
command = append(command, "--dns", v)
}
command = append(command, podman.UyuniNetwork)
log.Info().Msg(L("Restoring podman network"))
if err := runCmd(command[0], command[1:]...); err != nil {
log.Error().Err(err).Msg(L("Unlable to create podman network"))
return err
}
return nil
}
func parseSecretsData(data []byte) ([]shared.BackupSecretMap, error) {
secrets := []shared.BackupSecretMap{}
if err := json.Unmarshal(data, &secrets); err != nil {
log.Warn().Err(err).Msg(L("Unable to decode podman secrets"))
return nil, err
}
decodedSecrets := make([]shared.BackupSecretMap, len(secrets))
for i, v := range secrets {
decoded, err := base64.StdEncoding.DecodeString(v.Secret)
if err != nil {
log.Warn().Msgf(L("Unable to decode secret %s, using as is"), v.Name)
} else {
decodedSecrets[i] = shared.BackupSecretMap{
Name: v.Name,
Secret: string(decoded[:]),
}
}
}
return decodedSecrets, nil
}
func restorePodmanSecrets(header *tar.Header, tr *tar.Reader, flags *shared.Flagpole) error {
if flags.DryRun {
log.Info().Msgf(L("Would restore podman secrets"))
return nil
}
data := make([]byte, header.Size)
if _, err := tr.Read(data); err != io.EOF {
log.Warn().Msg(L("Failed to read backed up podman secrets, no secrets were restored"))
return err
}
secrets, err := parseSecretsData(data)
if err != nil {
log.Warn().Msg(L("Failed to decode backed up podman secrets, no secrets were restored"))
return err
}
var hasError error
log.Info().Msg(L("Restoring podman secrets"))
baseCommand := []string{"podman", "secret", "create"}
for _, v := range secrets {
if podman.IsSecretPresent(v.Name) {
if flags.ForceRestore {
baseCommand = append(baseCommand, "--replace")
} else {
log.Error().Msgf(L("Podman secret %s is already present, not restoring unless forced"), v.Name)
continue
}
}
command := append(baseCommand, v.Name, "-")
if err := runCmdInput(command[0], v.Secret, command[1:]...); err != nil {
log.Error().Msg(L("Unable to create podman secret"))
hasError = utils.JoinErrors(hasError, err)
}
}
return hasError
}
07070100000006000081a400000000000000000000000168ed21dd00001f6f000000000000000000000000000000000000002500000000mgradm/cmd/backup/restore/restore.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restore
import (
"errors"
"fmt"
"os"
"path"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
podman_mgradm "github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var runCmdInput = utils.RunCmdInput
var runCmd = utils.RunCmd
var systemd = podman.NewSystemd()
func Restore(
_ *types.GlobalFlags,
flags *shared.Flagpole,
_ *cobra.Command,
args []string,
) error {
inputDirectory := args[0]
printIntro(inputDirectory, flags)
dryRun := flags.DryRun
// SanityCheck
if err := sanityChecks(inputDirectory, flags); err != nil {
return shared.AbortError(err, false)
}
// Gather the list of volumes and images from the backup location
// Both parses provided flags and the produced list has volumes or images
// already skipped over if needed.
volumes, err := gatherVolumesToRestore(inputDirectory, flags)
if err != nil {
return shared.AbortError(err, false)
}
images, err := gatherImagesToRestore(inputDirectory, flags)
if err != nil {
return shared.AbortError(err, false)
}
// Restore provided volumes
// An error with volume restore is considered serious so we abort
// --continue can be used to skip over already imported images once error
// is resolved
if err := restoreVolumes(volumes, flags, dryRun); err != nil {
return shared.AbortError(err, true)
}
// Everything below is not considered a serious error as it can be recreated from
// defaults, but there may be a data loss
var hasError error
if err := restoreImages(images, dryRun); err != nil {
hasError = err
}
// Restore podman config or generate defaults
if err := restorePodmanConfig(inputDirectory, flags); err != nil {
hasError = utils.JoinErrors(hasError, err)
}
// Restore systemd config or generate defaults
if err := restoreSystemdConfig(inputDirectory, flags); err != nil {
hasError = utils.JoinErrors(hasError, err)
}
if flags.Restart {
hasError = podman_mgradm.StartServices()
}
return shared.ReportError(hasError)
}
func printIntro(dir string, flags *shared.Flagpole) {
log.Debug().Msg("Restoring backup with options:")
log.Debug().Msgf("input directory: %s", dir)
log.Debug().Msgf("dry run: %t", flags.DryRun)
log.Debug().Msgf("skip database: %t", flags.SkipDatabase)
log.Debug().Msgf("skip config: %t", flags.SkipConfig)
log.Debug().Msgf("skip restart: %t", flags.NoRestart)
log.Debug().Msgf("skip images: %t", flags.SkipImages)
log.Debug().Msgf("skip volumes: %s", flags.SkipVolumes)
log.Debug().Msgf("extra volumes: %s", flags.ExtraVolumes)
log.Debug().Msgf("skip existing: %t", flags.SkipExisting)
}
func sanityChecks(inputDirectory string, flags *shared.Flagpole) error {
if err := shared.SanityChecks(); err != nil {
return err
}
if !utils.FileExists(inputDirectory) {
return fmt.Errorf(L("input directory %s does not exists"), inputDirectory)
}
hostData, err := podman.InspectHost()
if err != nil {
return err
}
if hostData.HasUyuniServer {
if flags.ForceRestore {
log.Warn().Msg(L("Restoring over already initialized server"))
} else {
return errors.New(L("server is already initialized. Use force to overwrite"))
}
}
return nil
}
// gatherVolumesToRestore produces a list of volumes to be imported.
// It takes a list from the backup source, checks if volume already exists and if it is
// to be skipped.
// Special `--skipvolume all` handing will cause to return empty list.
func gatherVolumesToRestore(source string, flags *shared.Flagpole) ([]string, error) {
skipVolumes := flags.SkipVolumes
if len(skipVolumes) == 1 && skipVolumes[0] == "all" {
log.Debug().Msg("Skipping restoring of volumes")
return []string{}, nil
}
volumeDir := path.Join(source, "volumes")
if !utils.FileExists(volumeDir) {
return []string{}, errors.New(L("No volumes found in the backup"))
}
volumes, err := os.ReadDir(volumeDir)
if err != nil {
return nil, errors.New(L("Unable to read directory with the volumes"))
}
output := []string{}
for _, v := range volumes {
if strings.HasSuffix(v.Name(), "sha256sum") {
// This is checksum file, ignore
continue
}
volName := strings.TrimSuffix(v.Name(), ".tar")
// Skip volumes set as skipvolume option
if utils.Contains(skipVolumes, volName) {
log.Info().Msgf(L("Skipping volume %s"), volName)
continue
}
// Skip database volumes if skipdatabase option is used
if flags.SkipDatabase {
for _, v := range utils.PgsqlRequiredVolumeMounts {
if volName == v.Name {
log.Info().Msgf(L("Skipping database volume %s"), volName)
continue
}
}
}
if podman.IsVolumePresent(volName) {
if flags.SkipExisting {
log.Info().Msgf(L("Not restoring existing volume %s"), volName)
continue
}
if !flags.ForceRestore {
return nil, fmt.Errorf(L("Not restoring existing volume %s unless forced"), volName)
}
log.Info().Msgf(L("Volume %s will be overwriten"), volName)
}
output = append(output, path.Join(volumeDir, v.Name()))
}
return output, nil
}
// gatherImagesTorRestore produces a list of images to be imported.
// It checks if images are to be skipped, in which case it returns empty list.
func gatherImagesToRestore(source string, flags *shared.Flagpole) ([]string, error) {
if flags.SkipImages {
log.Debug().Msg("Skipping restoring of images")
return []string{}, nil
}
imagesDir := path.Join(source, "images")
if !utils.FileExists(imagesDir) {
return []string{}, errors.New(L("No images found in the backup"))
}
images, err := os.ReadDir(imagesDir)
if err != nil {
return nil, errors.New(L("Unable to read directory with the images"))
}
output := []string{}
for _, image := range images {
if !strings.HasSuffix(image.Name(), ".tar") {
continue
}
output = append(output, path.Join(imagesDir, image.Name()))
}
return output, nil
}
func restoreVolumes(volumes []string, flags *shared.Flagpole, dryRun bool) error {
for _, volume := range volumes {
volName := strings.TrimSuffix(volume, ".tar")
_, volName = path.Split(volName)
if err := podman.ImportVolume(volName, volume, flags.SkipVerify, dryRun); err != nil {
return err
}
}
return nil
}
func restoreImages(images []string, dryRun bool) error {
var hasErrors error
for _, image := range images {
if err := podman.RestoreImage(image, dryRun); err != nil {
hasErrors = utils.JoinErrors(hasErrors, err)
}
}
return hasErrors
}
func restorePodmanConfig(inputDirectory string, flags *shared.Flagpole) error {
podmanConfigFile := path.Join(inputDirectory, shared.PodmanConfBackupFile)
if !utils.FileExists(podmanConfigFile) {
log.Warn().Msg(L("podman config backup not found in the backup location, trying defaults"))
return defaultPodmanNetwork(flags)
}
if !flags.SkipVerify {
if err := utils.ValidateChecksum(podmanConfigFile); err != nil {
return utils.JoinErrors(err, errors.New(L("Unable to validate podman backup file")))
}
}
return restorePodmanConfiguration(podmanConfigFile, flags)
}
func restoreSystemdConfig(inputDirectory string, flags *shared.Flagpole) error {
log.Info().Msgf(L("Restoring systemd configuration"))
systemdConfigFile := path.Join(inputDirectory, shared.SystemdConfBackupFile)
if !utils.FileExists(systemdConfigFile) {
log.Warn().Msg(L("systemd backup not found in the backup location, generating defaults"))
return generateDefaltSystemdServices(flags)
}
if !flags.SkipVerify {
if err := utils.ValidateChecksum(systemdConfigFile); err != nil {
return utils.JoinErrors(err, errors.New(L("Unable to validate systemd backup file")))
}
}
if err := restoreSystemdConfiguration(systemdConfigFile, flags); err != nil {
return err
}
return systemd.ReloadDaemon(flags.DryRun)
}
07070100000007000081a400000000000000000000000168ed21dd00000d08000000000000000000000000000000000000002b00000000mgradm/cmd/backup/restore/systemd_utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restore
import (
"archive/tar"
"fmt"
"io"
"os"
"path/filepath"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup/shared"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/pgsql"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func restoreSystemdConfiguration(backupSource string, flags *shared.Flagpole) error {
backupFile, err := os.Open(backupSource)
if err != nil {
return err
}
defer backupFile.Close()
var hasError error
tr := tar.NewReader(backupFile)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if flags.DryRun {
log.Info().Msgf(L("Would restore %s"), header.Name)
continue
}
log.Debug().Msgf("Restoring systemd file %s", header.Name)
switch header.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(header.Name, header.FileInfo().Mode()); err != nil {
log.Warn().Msgf(L("Unable to create directory %s"), header.Name)
hasError = utils.JoinErrors(hasError, err)
continue
}
case tar.TypeReg:
if err := restoreSystemdFile(header, tr); err != nil {
hasError = utils.JoinErrors(hasError, err)
continue
}
default:
log.Warn().Msgf(L("Unknown filetype of %s"), header.Name)
continue
}
if err := restoreFileAttributes(header.Name, header); err != nil {
log.Warn().Err(err).Msgf(L("Unable to restore file details for %s"), header.Name)
hasError = utils.JoinErrors(hasError, err)
}
}
return hasError
}
func restoreSystemdFile(header *tar.Header, tr *tar.Reader) error {
// Systemd backup may not package directories anymore, so be sure they are present
if err := os.MkdirAll(filepath.Dir(header.Name), 0750); err != nil {
log.Warn().Msgf(L("Unable to create directories for %s"), header.Name)
}
fh, err := os.Create(header.Name)
if err != nil {
log.Warn().Err(err).Msgf(L("Unable to create %s"), header.Name)
return err
}
if _, err := io.Copy(fh, tr); err != nil {
log.Warn().Err(err).Msgf(L("Unable to restore content of %s"), header.Name)
fh.Close()
os.Remove(header.Name)
return err
}
fh.Close()
return nil
}
func restoreFileAttributes(filename string, th *tar.Header) error {
var e error
e = utils.JoinErrors(e, os.Chmod(filename, th.FileInfo().Mode()))
e = utils.JoinErrors(e, os.Chown(filename, th.Uid, th.Gid))
e = utils.JoinErrors(e, os.Chtimes(filename, th.AccessTime, th.ModTime))
return e
}
func generateDefaltSystemdServices(flags *shared.Flagpole) error {
if flags.DryRun {
log.Info().Msg(L("Would generate default systemd services"))
return nil
}
// Generate minimum set - uyuni-db and uyuni-server services - like we do on default install
serverImage := fmt.Sprintf("%s%s:%s", utils.ServerImage.Registry, utils.ServerImage.Name, utils.ServerImage.Tag)
dbImage := fmt.Sprintf("%s%s:%s",
utils.PostgreSQLImage.Registry,
utils.PostgreSQLImage.Name,
utils.PostgreSQLImage.Tag)
return utils.JoinErrors(
podman.GenerateSystemdService(systemd, "", serverImage, false, "", []string{}),
pgsql.GeneratePgsqlSystemdService(systemd, dbImage),
systemd.ReloadDaemon(false),
)
}
07070100000008000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/backup/restore07070100000009000081a400000000000000000000000168ed21dd00000749000000000000000000000000000000000000002200000000mgradm/cmd/backup/shared/types.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
type Flagpole struct {
SkipVolumes []string `mapstructure:"skipvolumes"`
ExtraVolumes []string `mapstructure:"extravolumes"`
SkipDatabase bool `mapstructure:"skipdatabase"`
SkipImages bool `mapstructure:"skipimages"`
SkipConfig bool `mapstructure:"skipconfig"`
NoRestart bool `mapstructure:"norestart"`
Restart bool `mapstructure:"restart"`
DryRun bool `mapstructure:"dryrun"`
ForceRestore bool `mapstructure:"force"`
SkipExisting bool `mapstructure:"continue"`
SkipVerify bool `mapstructure:"skipverify"`
}
// Backup error indicating if something was already backed up (resp. restored) or not.
type BackupError struct {
Err error
DataRemains bool
Abort bool
}
func (e *BackupError) Error() string {
return e.Err.Error()
}
func (e *BackupError) Unwrap() error {
return e.Err
}
// Wrap error with metadata indicating this error was fatal and job was aborted.
func AbortError(err error, dataRemains bool) error {
if err == nil {
return nil
}
return &BackupError{
Err: err,
DataRemains: dataRemains,
Abort: true,
}
}
// Wrap error with metadata indicating this error was not fatal.
func ReportError(err error) error {
if err == nil {
return nil
}
return &BackupError{
Err: err,
DataRemains: true,
Abort: false,
}
}
// Map of podman secret name and value.
type BackupSecretMap struct {
Name string
Secret string
}
type NetworkSubnet struct {
Subnet string
Gateway string
}
type PodmanNetworkConfigData struct {
Subnets []NetworkSubnet `mapstructure:"subnets"`
NetworkInsterface string `mapstructure:"network_interface"`
NetworkDNSServers []string `mapstructure:"network_dns_servers"`
}
0707010000000a000081a400000000000000000000000168ed21dd00000891000000000000000000000000000000000000002200000000mgradm/cmd/backup/shared/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
import (
"errors"
"os"
"os/exec"
"path/filepath"
"github.com/rs/zerolog/log"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"golang.org/x/sys/unix"
)
const PodmanConfBackupFile = "podmanBackup.tar"
const SystemdConfBackupFile = "systemdBackup.tar"
const NetworkOutputFile = "uyuniNetwork.json"
const SecretBackupFile = "secrets.json"
const VolumesSubdir = "volumes"
const ImagesSubdir = "images"
func StorageCheck(volumes []string, images []string, outputDirectory string) error {
// check disk space availability based on volume work list and container image list
var outStat unix.Statfs_t
if err := unix.Statfs(outputDirectory, &outStat); err != nil {
log.Warn().Err(err).Msgf(L("unable to determine target %s storage size"), outputDirectory)
}
freeSpace := outStat.Bavail * uint64(outStat.Bsize)
var spaceRequired int64
// calculate required space
for _, volume := range volumes {
mountPoint, err := podman.GetVolumeMountPoint(volume)
if err != nil {
return err
}
volumeSize, err := dirSize(mountPoint)
if err != nil {
return err
}
spaceRequired += volumeSize
}
// Calculate the size of the images
for _, image := range images {
// This is over estimating the actual size on disk since the layers can be shared,
// but that can't be bad to have more disk than actually needed.
size, err := podman.GetImageVirtualSize(image)
if err != nil {
return err
}
spaceRequired += size
}
if freeSpace < uint64(spaceRequired) {
return errors.New(L("insufficient space on target device"))
}
return nil
}
func SanityChecks() error {
if _, err := exec.LookPath("podman"); err != nil {
return errors.New(L("install podman before running this command"))
}
return nil
}
func dirSize(path string) (int64, error) {
var size int64
err := filepath.WalkDir(path, func(_ string, entry os.DirEntry, err error) error {
if err != nil {
return err
}
info, err := entry.Info()
if err != nil {
return err
}
size += info.Size()
return nil
})
return size, err
}
0707010000000b000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001900000000mgradm/cmd/backup/shared0707010000000c000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001200000000mgradm/cmd/backup0707010000000d000081a400000000000000000000000168ed21dd00000f06000000000000000000000000000000000000001200000000mgradm/cmd/cmd.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"os"
"path"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/completion"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/backup"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/distro"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/gpg"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/hub"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/inspect"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/install"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/restart"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/scale"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/server"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/start"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/status"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/stop"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/support"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/uninstall"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
)
// NewUyuniadmCommand returns a new cobra.Command implementing the root command for mgradm.
func NewUyuniadmCommand() (*cobra.Command, error) {
globalFlags := &types.GlobalFlags{}
name := path.Base(os.Args[0])
rootCmd := &cobra.Command{
Use: name,
Short: L("Uyuni administration tool"),
Long: L("Tool to help administering Uyuni servers in containers"),
Version: utils.Version,
SilenceUsage: true, // Don't show usage help on errors
}
rootCmd.AddGroup(&cobra.Group{
ID: "deploy",
Title: L("Server Deployment:"),
})
rootCmd.AddGroup(&cobra.Group{
ID: "management",
Title: L("Server Management:"),
})
rootCmd.AddGroup(&cobra.Group{
ID: "tool",
Title: L("Administrator tools:"),
})
rootCmd.SetUsageTemplate(utils.GetLocalizedUsageTemplate())
rootCmd.PersistentPreRun = func(cmd *cobra.Command, _ []string) {
// do not log if running the completion cmd as the output is redirected to create a file to source
if cmd.Name() != "completion" {
utils.LogInit(true)
utils.SetLogLevel(globalFlags.LogLevel)
log.Info().Msgf(L("Starting %s"), strings.Join(os.Args, " "))
log.Info().Msgf(L("Use of this software implies acceptance of the End User License Agreement."))
}
}
rootCmd.PersistentFlags().StringVarP(&globalFlags.ConfigPath, "config", "c", "", L("configuration file path"))
utils.AddLogLevelFlags(rootCmd, &globalFlags.LogLevel)
migrateCmd := migrate.NewCommand(globalFlags)
rootCmd.AddCommand(migrateCmd)
installCmd := install.NewCommand(globalFlags)
rootCmd.AddCommand(installCmd)
rootCmd.AddCommand(uninstall.NewCommand(globalFlags))
distroCmd, err := distro.NewCommand(globalFlags)
if err != nil {
return rootCmd, err
}
rootCmd.AddCommand(distroCmd)
rootCmd.AddCommand(completion.NewCommand(globalFlags))
rootCmd.AddCommand(support.NewCommand(globalFlags))
rootCmd.AddCommand(start.NewCommand(globalFlags))
rootCmd.AddCommand(scale.NewCommand(globalFlags))
rootCmd.AddCommand(hub.NewCommand(globalFlags))
rootCmd.AddCommand(restart.NewCommand(globalFlags))
rootCmd.AddCommand(stop.NewCommand(globalFlags))
rootCmd.AddCommand(status.NewCommand(globalFlags))
rootCmd.AddCommand(inspect.NewCommand(globalFlags))
rootCmd.AddCommand(upgrade.NewCommand(globalFlags))
rootCmd.AddCommand(gpg.NewCommand(globalFlags))
rootCmd.AddCommand(backup.NewCommand(globalFlags))
rootCmd.AddCommand(server.NewCommand(globalFlags))
rootCmd.AddCommand(utils.GetConfigHelpCommand())
return rootCmd, err
}
0707010000000e000081a400000000000000000000000168ed21dd00001776000000000000000000000000000000000000001800000000mgradm/cmd/distro/cp.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package distro
import (
"errors"
"fmt"
"os"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/api"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func isRoot() bool {
return os.Geteuid() == 0
}
func umount(mountpoint string) {
umountCmd := []string{}
if !isRoot() {
umountCmd = []string{"/usr/bin/sudo"}
}
umountCmd = append(umountCmd, "/usr/bin/umount", mountpoint)
if err := utils.RunCmd(umountCmd[0], umountCmd[1:]...); err != nil {
log.Error().Err(err).Msgf(L("Unable to unmount ISO image, leaving %s intact"), mountpoint)
}
}
func registerDistro(connection *api.ConnectionDetails, distro *types.Distribution, flags *flagpole) error {
// Fill server FQDN if not provided, ignore error, will be handled later
if flags.ConnectionDetails.Server == "" {
flags.ConnectionDetails.Server, _ = getServerFqdn(flags)
log.Debug().Msgf("Using api-server FQDN '%s'", flags.ConnectionDetails.Server)
}
client, err := api.Init(connection)
if err == nil {
err = client.Login()
}
if err != nil {
return utils.Errorf(err, L("unable to login and register the distribution. Manual distro registration is required"))
}
data := map[string]interface{}{
"treeLabel": distro.TreeLabel,
"basePath": distro.BasePath,
"channelLabel": distro.ChannelLabel,
"installType": distro.InstallType,
}
_, err = client.Post("kickstart/tree/create", data)
if err != nil {
return utils.Errorf(err, L("unable to register the distribution. Manual distro registration is required"))
}
log.Info().Msgf(L("Distribution %s successfully registered"), distro.TreeLabel)
return nil
}
func prepareSource(source string) (string, func(), error) {
srcdir := source
if !utils.FileExists(source) {
return "", nil, fmt.Errorf(L("source %s does not exists"), source)
}
var cleaner func()
if strings.HasSuffix(source, ".iso") {
log.Debug().Msg("Source is an ISO image")
var err error
srcdir, cleaner, err = utils.TempDir()
if err != nil {
return "", nil, err
}
mountCmd := []string{}
if !isRoot() {
mountCmd = []string{"/usr/bin/sudo"}
}
mountCmd = append(mountCmd, "/usr/bin/mount", "-o", "ro,loop", source, srcdir)
if out, err := utils.RunCmdOutput(zerolog.DebugLevel, mountCmd[0], mountCmd[1:]...); err != nil {
log.Debug().Msgf("Error mounting ISO image: '%s'", out)
return "", cleaner, fmt.Errorf(L("unable to mount ISO image: %s"), out)
}
// Not sure why, but cleaner is not setup once we leave if statement :/
return srcdir, cleaner, nil
}
return srcdir, cleaner, nil
}
func copyDistro(srcdir string, distro *types.Distribution, flags *flagpole) error {
if len(distro.TreeLabel) == 0 {
return errors.New(L("Missing TreeLabel. Please specify distribution name"))
}
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
const distrosPath = "/srv/www/distributions/"
dstpath := distrosPath + distro.TreeLabel
distro.BasePath = dstpath
if cnx.TestExistenceInPod(dstpath) {
return fmt.Errorf(L("distribution with same name already exists: %s"), dstpath)
}
if _, err := cnx.Exec("sh", "-c", "mkdir -p "+distrosPath); err != nil {
return utils.Errorf(err, L("cannot create %s path in container"), distrosPath)
}
log.Info().Msgf(L("Copying distribution %s"), distro.TreeLabel)
if err := cnx.Copy(srcdir, "server:"+dstpath, "tomcat", "susemanager"); err != nil {
return utils.Errorf(err, L("cannot copy %s"), dstpath)
}
log.Info().Msgf(L("Distribution has been copied into %s"), distro.BasePath)
return nil
}
func getServerFqdn(flags *flagpole) (string, error) {
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
fqdn, err := cnx.Exec("sh", "-c", "cat /etc/rhn/rhn.conf 2>/dev/null | grep 'java.hostname' | cut -d' ' -f3")
return strings.TrimSuffix(string(fqdn), "\n"), err
}
func distroCp(
_ *types.GlobalFlags,
flags *flagpole,
_ *cobra.Command,
args []string,
) error {
source := args[0]
distroDetails := types.DistributionDetails{}
if len(args) >= 2 {
distroDetails.Name = args[1]
if len(args) > 3 {
distroDetails.Version = args[2]
distroDetails.Arch = types.GetArch(args[3])
}
}
attemptRegistration := false
if flags.ConnectionDetails.User != "" && flags.ConnectionDetails.Password != "" {
attemptRegistration = true
}
srcdir, cleaner, err := prepareSource(source)
// Non-nil cleaner means tmpdir was successful, but mount failed. We need to defer cleanup
if cleaner != nil {
defer func() {
// If on top err is nill, all was successful, need umount
if err == nil {
umount(srcdir)
}
cleaner()
}()
}
if err != nil {
return err
}
distribution := types.Distribution{}
if err := detectDistro(srcdir, distroDetails, flags, &distribution); err != nil {
// If we do not want to do the registration, we don't need all the details for mere copy, just name
if attemptRegistration {
return err
}
log.Debug().Msg("Would not be able to auto register")
if len(distroDetails.Name) == 0 {
// If there is no hint, just use ISO/dir name
distroDetails.Name = getNameFromSource(source)
}
distribution.TreeLabel = distroDetails.Name
}
if len(args) == 1 {
log.Info().Msgf(L("Auto-detected distribution %s"), distribution.TreeLabel)
}
if err := copyDistro(srcdir, &distribution, flags); err != nil {
return err
}
if attemptRegistration {
return registerDistro(&flags.ConnectionDetails, &distribution, flags)
}
log.Info().Msgf(L("Continue by registering autoinstallation distribution"))
return nil
}
0707010000000f000081a400000000000000000000000168ed21dd0000140d000000000000000000000000000000000000001c00000000mgradm/cmd/distro/detect.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package distro
import (
"errors"
"fmt"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// Viper unmarshal keys in lowercase, we need to keep our bundled product map lowerkey as well.
var defaultProductMap = types.ProductMap{
"suse linux enterprise": {
"15 sp4": {
types.AMD64: {
TreeLabel: "SLES15SP4",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp4-pool-x86_64",
},
},
"15 sp5": {
types.AMD64: {
TreeLabel: "SLES15SP5",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp5-pool-x86_64",
},
},
"15 sp6": {
types.AMD64: {
TreeLabel: "SLES15SP6",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp6-pool-x86_64",
},
types.AArch64: {
TreeLabel: "SLES15SP6",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp6-pool-aarch64",
},
},
"15 sp7": {
types.AMD64: {
TreeLabel: "SLES15SP7",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp7-pool-x86_64",
},
types.AArch64: {
TreeLabel: "SLES15SP7",
InstallType: "sles15generic",
ChannelLabel: "sle-product-sles15-sp7-pool-aarch64",
},
},
"12 sp5": {
types.AMD64: {
TreeLabel: "SLES12SP5",
InstallType: "sles12generic",
ChannelLabel: "sles12-sp5-pool-x86_64",
},
},
},
"red hat enterprise linux": {
"7": {
types.AMD64: {
TreeLabel: "RHEL7",
InstallType: "rhel_7",
ChannelLabel: "rhel7-pool-x86_64",
},
},
"8": {
types.AMD64: {
TreeLabel: "RHEL8",
InstallType: "rhel_8",
ChannelLabel: "rhel8-pool-x86_64",
},
},
"9": {
types.AMD64: {
TreeLabel: "RHEL9",
InstallType: "rhel_9",
ChannelLabel: "rhel9-pool-x86_64",
},
},
},
}
func getDistroFromDetails(distro string, version string, arch types.Arch, flags *flagpole) (types.Distribution, error) {
productFromConfig := flags.ProductMap
var distribution types.Distribution
var ok bool
// product map is all lowercase, make sure we match case sensitive go lookup
distro = strings.ToLower(distro)
version = strings.ToLower(version)
if productFromConfig[distro] != nil {
distribution, ok = productFromConfig[distro][version][arch]
} else if defaultProductMap[distro] != nil {
distribution, ok = defaultProductMap[distro][version][arch]
}
if !ok {
return types.Distribution{}, errors.New(
L("distribution not found in product map. Please update productmap or provide channel label"),
)
}
return distribution, nil
}
func getDistroFromTreeinfo(path string, flags *flagpole) (types.Distribution, error) {
treeinfopath := filepath.Join(path, ".treeinfo")
log.Debug().Msgf("Reading .treeinfo %s", treeinfopath)
treeInfoViper := viper.New()
treeInfoViper.SetConfigType("ini")
treeInfoViper.SetConfigName(".treeinfo")
treeInfoViper.AddConfigPath(path)
if err := treeInfoViper.ReadInConfig(); err != nil {
return types.Distribution{}, errors.New(
L("unable to read distribution treeinfo. Please provide distribution details and/or channel label"),
)
}
dname := treeInfoViper.GetString("release.name")
dversion := treeInfoViper.GetString("release.version")
darch := treeInfoViper.GetString("general.arch")
log.Debug().Msgf("Detected distribution %s, version %s. arch %s", dname, dversion, darch)
return getDistroFromDetails(dname, dversion, types.GetArch(darch), flags)
}
func detectDistro(
path string,
distroDetails types.DistributionDetails,
flags *flagpole,
distro *types.Distribution,
) error {
treeinfopath := filepath.Join(path, ".treeinfo")
channelLabel := flags.ChannelLabel
if distroDetails.Name != "" {
if channelLabel != "" {
log.Debug().Msg("Using channel override")
*distro = types.Distribution{
InstallType: "suse",
TreeLabel: distroDetails.Name,
ChannelLabel: channelLabel,
}
return nil
} else if distroDetails.Version != "" && distroDetails.Arch != types.UnknownArch {
log.Debug().Msg("Using distro details override")
var err error
*distro, err = getDistroFromDetails(distroDetails.Name, distroDetails.Version, distroDetails.Arch, flags)
return err
}
} else if utils.FileExists(treeinfopath) {
log.Debug().Msgf("Using .treeinfo %s", treeinfopath)
} else {
return fmt.Errorf(
L("distribution treeinfo %s does not exists. Please provide distribution details and/or channel label"),
treeinfopath,
)
}
var err error
*distro, err = getDistroFromTreeinfo(path, flags)
if err != nil {
return err
}
// Overrides from the command line
if distroDetails.Name != "" {
distro.TreeLabel = distroDetails.Name
}
if channelLabel != "" {
distro.ChannelLabel = channelLabel
}
return nil
}
func getNameFromSource(source string) string {
return strings.TrimSuffix(path.Base(source), ".iso")
}
07070100000010000081a400000000000000000000000168ed21dd00001644000000000000000000000000000000000000001c00000000mgradm/cmd/distro/distro.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package distro
import (
"strings"
"text/template"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/api"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
"gopkg.in/yaml.v2"
)
type flagpole struct {
Backend string
ChannelLabel string `mapstructure:"channel"`
ProductMap types.ProductMap `mapstructure:"ProductMap"`
ConnectionDetails api.ConnectionDetails `mapstructure:"api"`
}
type productMapTemplateData struct {
DefaultProductMapRender string
ProductMapRender string
}
func prettyPrint(productMap types.ProductMap) string {
if prettyPrintedProductMapBytes, err := yaml.Marshal(map[string]interface{}{"ProductMap": productMap}); err == nil {
return string(prettyPrintedProductMapBytes)
}
return ""
}
func getProductMapHelp() string {
return L(`Auto installation distribution product mapping.
For distribution to be registered by the server it is important to map distribution to the correct software channel.
Software channels can be named differently without any corellation to distribution name;
it is then needed to allow custom distribution name to software channel mapping.
One way to set software channel is by flag --channel to the distribution copy command.
For frequent usage it is possible to write custom product mapping to the mgradm configuration file as follows:
ProductMap:
<distribution name>:
<distribution version>:
<distribution architecture>:
ChannelLabel: <channel label>
InstallType: <one of rhel_7|rhel_8|rhel_9|sles12generic|sles15generic|generic_rpm>
TreeLabel: <custom distribution name>
Where
* <distribution name> is the name of the distribution, by default taken from '.treeinfo' file from the media.
If '.treeinfo' is not found or available, command line option is required and used.
* <distribution version> is the version of the distribution, by default taken from '.treeinfo' file from the media.
If'.treeinfo' is not found, command line option is required and used.
* <distribution architecture> is distribution architecture, by default taken from '.treeinfo' file from the media.
If '.treeinfo' is not found, command line option is required and used.
* ChannelLabel is the channel label from Uyuni server and which is to be used for this distribution;
can be overridden by command line flag.
* InstallType is used when installer is known (for autoyast or kickstart) or use 'generic_rpm'.
* TreeLabel is how the distribution will be presented in the Uyuni server UI. If not set <distribution name> is used.
Build-in product map:
{{ .DefaultProductMapRender }}
`)
}
func showHelp(_ *types.GlobalFlags,
flags *flagpole,
_ *cobra.Command,
_ []string,
) error {
mergedMaps := make(types.ProductMap, len(defaultProductMap))
for k, v := range defaultProductMap {
mergedMaps[k] = v
}
for distro, versions := range flags.ProductMap {
if _, ok := mergedMaps[distro]; ok {
for version, archs := range versions {
if _, ok := mergedMaps[distro][version]; ok {
for arch, distroDetail := range archs {
// product map from config file has prio, overwrite
mergedMaps[distro][version][arch] = distroDetail
}
} else {
mergedMaps[distro][version] = archs
}
}
} else {
mergedMaps[distro] = versions
}
}
print(prettyPrint(mergedMaps))
return nil
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[flagpole]) (*cobra.Command, error) {
var flags flagpole
distroCmd := &cobra.Command{
Use: "distribution",
GroupID: "tool",
Short: L("Distributions management"),
Long: L("Tools for autoinstallation distributions management"),
Aliases: []string{"distro"},
}
cpCmd := &cobra.Command{
Use: "copy path-to-source [distribution-name [version arch]]",
Short: L("Copy distribution files from iso to the container"),
Long: L(`Takes a path to source iso file or directory with mounted iso and copies it into the container.
Optional parameters 'distribution-name', 'version' and 'arch' specifies custom distribution.
If not set, distribution name is attempted to be autodetected:
- use name from '.treeinfo' file if exists
- use name of the ISO or passed directory
Note: API details are required for auto registration.`),
Aliases: []string{"cp"},
RunE: func(cmd *cobra.Command, args []string) error {
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
cpCmd.Flags().String("channel", "", L("Set parent channel for the distribution."))
cpCmdHelp := &cobra.Command{
Use: "productmap",
Short: L("Show distribution product map"),
RunE: func(cmd *cobra.Command, args []string) error {
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, showHelp)
},
}
t := template.Must(template.New("help").Parse(getProductMapHelp()))
var helpBuilder strings.Builder
if err := t.Execute(&helpBuilder, productMapTemplateData{
DefaultProductMapRender: prettyPrint(defaultProductMap),
ProductMapRender: prettyPrint(flags.ProductMap),
}); err != nil {
log.Fatal().Err(err).Msg(L("failed to compute config help command"))
}
cpCmdHelp.SetHelpTemplate(helpBuilder.String())
api.AddAPIFlags(distroCmd)
distroCmd.AddCommand(cpCmd)
distroCmd.AddCommand(cpCmdHelp)
return distroCmd, nil
}
// NewCommand command for distribution management.
func NewCommand(globalFlags *types.GlobalFlags) (*cobra.Command, error) {
return newCmd(globalFlags, distroCp)
}
07070100000011000081a400000000000000000000000168ed21dd00000412000000000000000000000000000000000000002100000000mgradm/cmd/distro/distro_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package distro
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"copy",
"--channel", "parent-channel",
}
args = append(args, flagstests.APIFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *flagpole, _ *cobra.Command, _ []string) error {
testutils.AssertEquals(t, "Error parsing --channel", "parent-channel", flags.ChannelLabel)
flagstests.AssertAPIFlags(t, &flags.ConnectionDetails)
return nil
}
globalFlags := types.GlobalFlags{}
cmd, _ := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000012000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001200000000mgradm/cmd/distro07070100000013000081a400000000000000000000000168ed21dd0000119f000000000000000000000000000000000000001a00000000mgradm/cmd/gpg/add/gpg.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package gpgadd
import (
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
const customKeyringPath = "/var/spacewalk/gpg/customer-build-keys.gpg"
type gpgAddFlags struct {
Backend string
Force bool
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[gpgAddFlags]) *cobra.Command {
gpgAddKeyCmd := &cobra.Command{
Use: "add [URL]...",
Short: L("Add GPG keys for 3rd party repositories"),
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags gpgAddFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
gpgAddKeyCmd.Flags().BoolP("force", "f", false, L("Import without asking confirmation"))
utils.AddBackendFlag(gpgAddKeyCmd)
return gpgAddKeyCmd
}
// NewCommand import gpg keys from 3rd party repository.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, gpgAddKeys)
}
func gpgAddKeys(_ *types.GlobalFlags, flags *gpgAddFlags, _ *cobra.Command, args []string) error {
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
if !cnx.TestExistenceInPod(customKeyringPath) {
if err := adm_utils.ExecCommand(
zerolog.InfoLevel, cnx, "mkdir", "-m", "700", "-p", filepath.Dir(customKeyringPath),
); err != nil {
return utils.Errorf(err, L("failed to create folder %s"), filepath.Dir(customKeyringPath))
}
if err := adm_utils.ExecCommand(
zerolog.InfoLevel, cnx, "gpg", "--no-default-keyring", "--keyring", customKeyringPath, "--fingerprint",
); err != nil {
return utils.Errorf(err, L("failed to create keyring %s"), customKeyringPath)
}
}
gpgAddCmd := []string{"gpg", "--no-default-keyring", "--import", "--import-options", "import-minimal"}
gpgAddCmd = append(gpgAddCmd, "--keyring", customKeyringPath)
tmpDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
for _, keyURL := range args {
var hostKeyPath string
var keyname string
if _, err := os.Stat(keyURL); err == nil {
// gpg passed in a local file
hostKeyPath = keyURL
keyname = filepath.Base(hostKeyPath)
} else {
// Parse the URL
parsedURL, err := url.Parse(keyURL)
if err != nil {
log.Error().Err(err).Msgf(L("failed to parse %s"), keyURL)
continue
}
keyname = path.Base(parsedURL.Path)
hostKeyPath = filepath.Join(tmpDir, keyname)
if err := utils.DownloadFile(hostKeyPath, keyURL); err != nil {
log.Error().Err(err).Msgf(L("failed to download %s"), keyURL)
continue
}
}
if err := utils.RunCmdStdMapping(zerolog.InfoLevel, "gpg", "--show-key", hostKeyPath); err != nil {
log.Error().Err(err).Msgf(L("failed to show key %s"), hostKeyPath)
continue
}
if !flags.Force {
ret, err := utils.YesNo(L("Do you really want to trust this key"))
if err != nil {
return err
}
if !ret {
return nil
}
}
containerKeyPath := filepath.Join(filepath.Dir(customKeyringPath), keyname)
if err := cnx.Copy(hostKeyPath, "server:"+containerKeyPath, "", ""); err != nil {
log.Error().Err(err).Msgf(L("failed to copy %[1]s to %[2]s"), hostKeyPath, containerKeyPath)
continue
}
defer func() {
_ = adm_utils.ExecCommand(zerolog.Disabled, cnx, "rm", containerKeyPath)
}()
gpgAddCmd = append(gpgAddCmd, containerKeyPath)
}
log.Info().Msgf(L("Running %s"), strings.Join(gpgAddCmd, " "))
if err := adm_utils.ExecCommand(zerolog.InfoLevel, cnx, gpgAddCmd...); err != nil {
return utils.Errorf(err, L("failed to run import key"))
}
// this is for running import-suma-build-keys, who import customer-build-keys.gpg
uyuniUpdateCmd := []string{"systemctl", "restart", "uyuni-update-config"}
log.Info().Msgf(L("Running %s"), strings.Join(uyuniUpdateCmd, " "))
if err := adm_utils.ExecCommand(zerolog.InfoLevel, cnx, uyuniUpdateCmd...); err != nil {
return utils.Errorf(err, L("failed to restart uyuni-update-config"))
}
return err
}
07070100000014000081a400000000000000000000000168ed21dd000003a0000000000000000000000000000000000000001f00000000mgradm/cmd/gpg/add/gpg_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package gpgadd
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--force",
"--backend", "kubectl",
"path/to/key",
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *gpgAddFlags, _ *cobra.Command, _ []string) error {
testutils.AssertTrue(t, "Error parsing --force", flags.Force)
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000015000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/gpg/add07070100000016000081a400000000000000000000000168ed21dd00000316000000000000000000000000000000000000001600000000mgradm/cmd/gpg/gpg.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package gpg
import (
"github.com/spf13/cobra"
gpgadd "github.com/uyuni-project/uyuni-tools/mgradm/cmd/gpg/add"
gpglist "github.com/uyuni-project/uyuni-tools/mgradm/cmd/gpg/list"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand import gpg keys from 3rd party repository.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
gpgKeyCmd := &cobra.Command{
Use: "gpg",
GroupID: "tool",
Short: L("Manage GPG keys for 3rd party repositories"),
Args: cobra.ExactArgs(1),
}
gpgKeyCmd.AddCommand(gpgadd.NewCommand(globalFlags))
gpgKeyCmd.AddCommand(gpglist.NewCommand(globalFlags))
return gpgKeyCmd
}
07070100000017000081a400000000000000000000000168ed21dd00000897000000000000000000000000000000000000001b00000000mgradm/cmd/gpg/list/gpg.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package gpglist
import (
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
const customKeyringPath = "/var/spacewalk/gpg/customer-build-keys.gpg"
const systemKeyringPath = "/usr/lib/susemanager/susemanager-build-keys.gpg"
type gpgListFlags struct {
Backend string
System bool
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[gpgListFlags]) *cobra.Command {
gpgListKeyCmd := &cobra.Command{
Use: "list",
Short: L("List GPG keys"),
Long: L("List GPG keys from custom keyring (default) or system keyring"),
RunE: func(cmd *cobra.Command, args []string) error {
var flags gpgListFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
gpgListKeyCmd.Flags().BoolP("system", "s", false, L("List keys from system keyring"))
utils.AddBackendFlag(gpgListKeyCmd)
return gpgListKeyCmd
}
// NewCommand lists imported gpg keys.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, gpgListKeys)
}
func gpgListKeys(_ *types.GlobalFlags, flags *gpgListFlags, _ *cobra.Command, _ []string) error {
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
gpgListCmd := []string{"gpg", "--no-default-keyring", "--keyring"}
if flags.System {
gpgListCmd = append(gpgListCmd, systemKeyringPath, "--list-keys")
} else {
gpgListCmd = append(gpgListCmd, customKeyringPath, "--list-keys")
}
log.Info().Msgf(L("Running %s"), strings.Join(gpgListCmd, " "))
if err := adm_utils.ExecCommand(
zerolog.InfoLevel, cnx, gpgListCmd...,
); err != nil {
return utils.Errorf(err, L("failed to list keys in selected keyring"))
}
return nil
}
07070100000018000081a400000000000000000000000168ed21dd00000394000000000000000000000000000000000000002000000000mgradm/cmd/gpg/list/gpg_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package gpglist
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--system",
"--backend", "kubectl",
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *gpgListFlags, _ *cobra.Command, _ []string) error {
testutils.AssertTrue(t, "Error parsing --system", flags.System)
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000019000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001400000000mgradm/cmd/gpg/list0707010000001a000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000000f00000000mgradm/cmd/gpg0707010000001b000081a400000000000000000000000168ed21dd000002d2000000000000000000000000000000000000001600000000mgradm/cmd/hub/hub.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package hub
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/hub/register"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand command for Hub management.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
hubCmd := &cobra.Command{
Use: "hub",
GroupID: "management",
Short: L("Hub management"),
Long: L("Tools and utilities for Hub management"),
Aliases: []string{"hub"},
}
hubCmd.SetUsageTemplate(hubCmd.UsageTemplate())
hubCmd.AddCommand(register.NewCommand(globalFlags))
return hubCmd
}
0707010000001c000081a400000000000000000000000168ed21dd00000fe7000000000000000000000000000000000000002400000000mgradm/cmd/hub/register/register.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package register
import (
"fmt"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/api"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type registerFlags struct {
Backend string
ConnectionDetails api.ConnectionDetails `mapstructure:"api"`
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[registerFlags]) *cobra.Command {
registerCmd := &cobra.Command{
Use: "register",
Short: L("Register"),
Long: L("Register this peripheral server to Hub API"),
Args: cobra.MaximumNArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags registerFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
registerCmd.SetUsageTemplate(registerCmd.UsageTemplate())
if utils.KubernetesBuilt {
utils.AddBackendFlag(registerCmd)
}
api.AddAPIFlags(registerCmd)
return registerCmd
}
// NewCommand command for registering peripheral server to hub.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, register)
}
func register(_ *types.GlobalFlags, flags *registerFlags, _ *cobra.Command, _ []string) error {
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
config, err := getRhnConfig(cnx)
if err != nil {
return err
}
err = registerToHub(config, &flags.ConnectionDetails)
return err
}
func getRhnConfig(cnx *shared.Connection) (map[string]string, error) {
out, err := cnx.Exec("/bin/cat", "/etc/rhn/rhn.conf")
if err != nil {
return nil, err
}
config := make(map[string]string)
lines := strings.Split(string(out), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || strings.HasPrefix(line, "#") {
continue
}
log.Trace().Msgf("Config: %s", line)
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf(L("invalid line format: %s"), line)
}
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
config[key] = value
}
return config, nil
}
func registerToHub(config map[string]string, cnxDetails *api.ConnectionDetails) error {
keys := []string{"java.hostname", "report_db_name", "report_db_port", "report_db_user", "report_db_password"}
for _, key := range keys {
if _, ok := config[key]; !ok {
return fmt.Errorf(L("mandatory %s entry missing in config"), key)
}
}
log.Info().Msgf(L("Hub API server: %s"), cnxDetails.Server)
client, err := api.Init(cnxDetails)
if err == nil {
err = client.Login()
}
if err != nil {
return utils.Errorf(err, L("failed to connect to the Hub server"))
}
data := map[string]interface{}{
"fqdn": config["java.hostname"],
}
ret, err := api.Post[int](client, "system/registerPeripheralServer", data)
if err != nil {
return utils.Errorf(err, L("failed to register this peripheral server"))
}
if !ret.Success {
return fmt.Errorf(L("failed to register this peripheral server: %s"), ret.Message)
}
id := ret.Result
data = map[string]interface{}{
"sid": id,
"reportDbName": config["report_db_name"],
"reportDbHost": config["java.hostname"],
"reportDbPort": config["report_db_port"],
"reportDbUser": config["report_db_user"],
"reportDbPassword": config["report_db_password"],
}
ret, err = api.Post[int](client, "system/updatePeripheralServerInfo", data)
if err != nil {
return utils.Errorf(err, L("failed to update peripheral server info"))
}
if !ret.Success {
return fmt.Errorf(L("failed to update peripheral server info: %s"), ret.Message)
}
log.Info().Msgf(L("Registered peripheral server: %[1]s, ID: %[2]d"), config["java.hostname"], id)
return nil
}
0707010000001d000081a400000000000000000000000168ed21dd00000481000000000000000000000000000000000000002900000000mgradm/cmd/hub/register/register_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package register
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
args = append(args, flagstests.APIFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *registerFlags, _ *cobra.Command, _ []string) error {
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
flagstests.AssertAPIFlags(t, &flags.ConnectionDetails)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000001e000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001800000000mgradm/cmd/hub/register0707010000001f000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000000f00000000mgradm/cmd/hub07070100000020000081a400000000000000000000000168ed21dd000006fb000000000000000000000000000000000000001e00000000mgradm/cmd/inspect/inspect.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package inspect
import (
"github.com/spf13/cobra"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// InspectFlags are the flags used by inspect commands.
type inspectFlags struct {
Image types.ImageFlags `mapstructure:",squash"`
Pgsql types.PgsqlFlags
SCC types.SCCCredentials
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[inspectFlags]) *cobra.Command {
inspectCmd := &cobra.Command{
Use: "inspect",
GroupID: "deploy",
Short: L("Inspect"),
Long: L("Extract information from image and deployment"),
Args: cobra.MaximumNArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags inspectFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
inspectCmd.SetUsageTemplate(inspectCmd.UsageTemplate())
cmd_utils.AddSCCFlag(inspectCmd)
cmd_utils.AddImageFlag(inspectCmd)
cmd_utils.AddPgsqlFlags(inspectCmd)
if utils.KubernetesBuilt {
utils.AddBackendFlag(inspectCmd)
}
return inspectCmd
}
// NewCommand for extracting information from image and deployment.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, inspect)
}
func inspect(globalFlags *types.GlobalFlags, flags *inspectFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), podmanInspect, kuberneteInspect)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
07070100000021000081a400000000000000000000000168ed21dd00000539000000000000000000000000000000000000002300000000mgradm/cmd/inspect/inspect_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package inspect
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
args = append(args, flagstests.ImageFlagsTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
args = append(args, flagstests.PgsqlFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *inspectFlags, _ *cobra.Command, _ []string) error {
flagstests.AssertImageFlag(t, &flags.Image)
flagstests.AssertSCCFlag(t, &flags.SCC)
flagstests.AssertPgsqlFlag(t, &flags.Pgsql)
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000022000081a400000000000000000000000168ed21dd0000072d000000000000000000000000000000000000002100000000mgradm/cmd/inspect/kubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package inspect
import (
"encoding/json"
"errors"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func kuberneteInspect(
_ *types.GlobalFlags,
flags *inspectFlags,
_ *cobra.Command,
_ []string,
) error {
serverImage, err := utils.ComputeImage("", utils.DefaultTag, flags.Image)
if err != nil && len(serverImage) > 0 {
return utils.Errorf(err, L("failed to determine image"))
}
cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
if len(serverImage) <= 0 {
log.Debug().Msg("Use deployed image")
serverImage, err = kubernetes.GetRunningImage("uyuni")
if err != nil {
return errors.New(L("failed to find the image of the currently running server container: %s"))
}
}
namespace, err := cnx.GetNamespace("")
if err != nil {
return utils.Errorf(err, L("failed retrieving namespace"))
}
// Get the SCC credentials secret if existing
pullSecret, err := kubernetes.GetRegistrySecret(namespace, &types.SCCCredentials{}, kubernetes.ServerApp)
if err != nil {
return err
}
inspectResult, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret)
if err != nil {
return utils.Errorf(err, L("inspect command failed"))
}
prettyInspectOutput, err := json.MarshalIndent(inspectResult, "", " ")
if err != nil {
return utils.Errorf(err, L("cannot print inspect result"))
}
outputString := "\n" + string(prettyInspectOutput)
log.Info().Msgf(outputString)
return nil
}
07070100000023000081a400000000000000000000000168ed21dd0000014a000000000000000000000000000000000000001e00000000mgradm/cmd/inspect/nobuild.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package inspect
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kuberneteInspect(
_ *types.GlobalFlags,
_ *inspectFlags,
_ *cobra.Command,
_ []string,
) error {
return nil
}
07070100000024000081a400000000000000000000000168ed21dd00000ac0000000000000000000000000000000000000001d00000000mgradm/cmd/inspect/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package inspect
import (
"encoding/json"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func podmanInspect(
_ *types.GlobalFlags,
flags *inspectFlags,
_ *cobra.Command,
_ []string,
) error {
serverImage, err := utils.ComputeImage("", utils.DefaultTag, flags.Image)
if err != nil && len(serverImage) > 0 {
return utils.Errorf(err, L("failed to determine server image"))
}
if len(serverImage) <= 0 {
log.Debug().Msg("Use already deployed server image")
serverImage, err = podman.GetRunningImage(podman.ServerContainerName)
if err != nil {
return utils.Errorf(err, L("failed to find the image of the currently running server container"))
}
}
log.Debug().Msgf("Wanted database image %[1]s", flags.Pgsql.Image.Name)
pgsqlImage, err := utils.ComputeImage("", utils.DefaultTag, flags.Pgsql.Image)
if err != nil && len(pgsqlImage) > 0 {
return utils.Errorf(err, L("failed to determine pgsql image"))
}
if len(pgsqlImage) <= 0 {
log.Debug().Msg("Use already deployed database image")
pgsqlImage, err = podman.GetRunningImage(podman.DBContainerName)
if err != nil {
return utils.Errorf(err, L("failed to find the image of the currently running db container"))
}
}
preparedServerImage, preparedDBImage, err := prepareImages(serverImage, pgsqlImage, flags.Image.PullPolicy, flags.SCC)
if err != nil {
return err
}
inspectResult, err := podman.Inspect(preparedServerImage, preparedDBImage)
if err != nil {
return utils.Errorf(err, L("inspect command failed"))
}
prettyInspectOutput, err := json.MarshalIndent(inspectResult, "", " ")
if err != nil {
return utils.Errorf(err, L("cannot print inspect result"))
}
outputString := "\n" + string(prettyInspectOutput)
log.Info().Msgf(outputString)
return nil
}
func prepareImages(
server string, pgsql string, pullPolicy string, scc types.SCCCredentials,
) (serverImage string, dbImage string, err error) {
hostData, err := podman.InspectHost()
if err != nil {
return "", "", err
}
authFile, cleaner, err := podman.PodmanLogin(hostData, scc)
if err != nil {
return "", "", utils.Errorf(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
serverImage, err = podman.PrepareImage(authFile, server, pullPolicy, true)
if err != nil {
return "", "", err
}
dbImage, err = podman.PrepareImage(authFile, pgsql, pullPolicy, true)
if err != nil {
return serverImage, "", err
}
return serverImage, dbImage, nil
}
07070100000025000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/inspect07070100000026000081a400000000000000000000000168ed21dd00000340000000000000000000000000000000000000001e00000000mgradm/cmd/install/install.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package install
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/kubernetes"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand for installation.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
installCmd := &cobra.Command{
Use: "install",
GroupID: "deploy",
Short: L("Install a new server"),
Long: L("Install a new server"),
}
installCmd.AddCommand(podman.NewCommand(globalFlags))
if kubernetesCmd := kubernetes.NewCommand(globalFlags); kubernetesCmd != nil {
installCmd.AddCommand(kubernetesCmd)
}
return installCmd
}
07070100000027000081a400000000000000000000000168ed21dd000008ce000000000000000000000000000000000000002c00000000mgradm/cmd/install/kubernetes/kubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "kubernetes [fqdn]",
Short: L("Install a new server on a kubernetes cluster"),
Long: L(`Install a new server on a kubernetes cluster
The install command assumes the following:
* kubectl and helm are installed locally
* a working kubectl configuration should be set to connect to the cluster to deploy to
The helm values file will be overridden with the values from the command parameters or configuration.
NOTE: installing on a remote cluster is not supported yet!
`),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags kubernetes.KubernetesServerFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
flags.ServerFlags.Pgsql.IsChanged = v.IsSet("pgsql.replicas")
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
shared.AddInstallFlags(cmd)
cmd_utils.AddHelmInstallFlag(cmd)
cmd_utils.AddVolumesFlags(cmd)
return cmd
}
// NewCommand for kubernetes installation.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, installForKubernetes)
}
func installForKubernetes(
_ *types.GlobalFlags,
flags *kubernetes.KubernetesServerFlags,
cmd *cobra.Command,
args []string,
) error {
flags.Installation.CheckParameters(cmd, "kubectl")
return kubernetes.Reconcile(flags, args[0])
}
07070100000028000081a400000000000000000000000168ed21dd00000594000000000000000000000000000000000000003100000000mgradm/cmd/install/kubernetes/kubernetes_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := flagstests.InstallFlagsTestArgs()
args = append(args, flagstests.ServerKubernetesFlagsTestArgs...)
args = append(args, flagstests.VolumesFlagsTestExpected...)
args = append(args, flagstests.PgsqlFlagsTestArgs...)
args = append(args, "srv.fq.dn")
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags,
_ *cobra.Command, args []string,
) error {
flagstests.AssertInstallFlags(t, &flags.ServerFlags)
flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes)
flagstests.AssertVolumesFlags(t, &flags.Volumes)
flagstests.AssertPgsqlFlag(t, &flags.Pgsql)
testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0])
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000029000081a400000000000000000000000168ed21dd0000011a000000000000000000000000000000000000002900000000mgradm/cmd/install/kubernetes/nobuild.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func NewCommand(_ *types.GlobalFlags) *cobra.Command {
return nil
}
0707010000002a000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001e00000000mgradm/cmd/install/kubernetes0707010000002b000081a400000000000000000000000168ed21dd000008d7000000000000000000000000000000000000002400000000mgradm/cmd/install/podman/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type podmanInstallFlags struct {
adm_utils.ServerFlags `mapstructure:",squash"`
Podman podman.PodmanFlags
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanInstallFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "podman [fqdn]",
Short: L("Install a new server on podman"),
Long: L(`Install a new server on podman
The install podman command assumes podman is installed locally.
NOTE: installing on a remote podman is not supported yet!
`),
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags podmanInstallFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
if flags.Installation.SSL.Ca.IsThirdParty() && !flags.Installation.SSL.DB.CA.IsThirdParty() {
flags.Installation.SSL.DB.CA.Root = flags.Installation.SSL.Ca.Root
flags.Installation.SSL.DB.CA.Intermediate = flags.Installation.SSL.Ca.Intermediate
}
if flags.Installation.SSL.Server.IsDefined() && !flags.Installation.SSL.DB.IsDefined() {
flags.Installation.SSL.DB.Cert = flags.Installation.SSL.Server.Cert
flags.Installation.SSL.DB.Key = flags.Installation.SSL.Server.Key
}
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
adm_utils.AddMirrorFlag(cmd)
shared.AddInstallFlags(cmd)
podman.AddPodmanArgFlag(cmd)
return cmd
}
// NewCommand for podman installation.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, installForPodman)
}
0707010000002c000081a400000000000000000000000168ed21dd000011cb000000000000000000000000000000000000002900000000mgradm/cmd/install/podman/podman_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"os"
"path"
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := flagstests.InstallFlagsTestArgs()
args = append(args, flagstests.MirrorFlagTestArgs...)
args = append(args, flagstests.PodmanFlagsTestArgs...)
args = append(args, "srv.fq.dn")
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags,
_ *cobra.Command, args []string,
) error {
flagstests.AssertMirrorFlag(t, flags.Mirror)
flagstests.AssertInstallFlags(t, &flags.ServerFlags)
flagstests.AssertPodmanInstallFlags(t, &flags.Podman)
testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0])
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
func TestParamsChangedConfig(t *testing.T) {
config := `
coco:
replicas: 2
hubxmlrpc:
replicas: 0
saline:
port: 8226
replicas: 1
`
dir := t.TempDir()
configPath := path.Join(dir, "config.yaml")
if err := os.WriteFile(configPath, []byte(config), 0600); err != nil {
t.Fatalf("Failed to write config file: %s", err)
}
tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags,
_ *cobra.Command, _ []string,
) error {
testutils.AssertEquals(t, "Coco replicas badly parsed", 2, flags.Coco.Replicas)
testutils.AssertTrue(t, "Coco replicas not marked as changed", flags.Coco.IsChanged)
testutils.AssertEquals(t, "Hub XML-RPC API replicas badly parsed", 0, flags.HubXmlrpc.Replicas)
testutils.AssertTrue(t, "Hub XML-RPC API replicas not marked as changed", flags.HubXmlrpc.IsChanged)
testutils.AssertEquals(t, "Saline replicas badly parsed", 1, flags.Saline.Replicas)
testutils.AssertEquals(t, "Saline port badly parsed", 8226, flags.Saline.Port)
testutils.AssertTrue(t, "Saline flags not marked as changed", flags.Saline.IsChanged)
return nil
}
globalFlags := types.GlobalFlags{ConfigPath: configPath}
cmd := newCmd(&globalFlags, tester)
cmd.SetArgs([]string{"srv.fq.dn"})
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
func TestParamsNoConfig(t *testing.T) {
tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags,
_ *cobra.Command, _ []string,
) error {
testutils.AssertEquals(t, "Coco replicas badly parsed", 0, flags.Coco.Replicas)
testutils.AssertTrue(t, "Coco replicas marked as changed", !flags.Coco.IsChanged)
testutils.AssertEquals(t, "Hub XML-RPC API replicas badly parsed", 0, flags.HubXmlrpc.Replicas)
testutils.AssertTrue(t, "Hub XML-RPC API replicas marked as changed", !flags.HubXmlrpc.IsChanged)
testutils.AssertEquals(t, "Saline replicas badly parsed", 0, flags.Saline.Replicas)
testutils.AssertEquals(t, "Saline port badly parsed", 8216, flags.Saline.Port)
testutils.AssertTrue(t, "Saline flags marked as changed", !flags.Saline.IsChanged)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
cmd.SetArgs([]string{"srv.fq.dn"})
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
func TestSSLCAParams(t *testing.T) {
tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags,
_ *cobra.Command, _ []string,
) error {
DBSSL := flags.ServerFlags.Installation.SSL.DB
testutils.AssertTrue(t, "SSL DB CA flags not reused", DBSSL.CA.IsThirdParty())
testutils.AssertEquals(t, "Wrong SSL DB CA root", "path/to/ca.crt", DBSSL.CA.Root)
testutils.AssertEquals(t, "Wrong SSL DB intermediate cert", "path/to/intermediate0.crt", DBSSL.CA.Intermediate[0])
testutils.AssertEquals(t, "Wrong SSL DB server cert", "path/to/srv.crt", DBSSL.Cert)
testutils.AssertEquals(t, "Wrong SSL DB server key", "path/to/srv.key", DBSSL.Key)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
cmd.SetArgs([]string{"srv.fq.dn",
"--ssl-ca-root", "path/to/ca.crt",
"--ssl-ca-intermediate", "path/to/intermediate0.crt",
"--ssl-server-cert", "path/to/srv.crt",
"--ssl-server-key", "path/to/srv.key",
})
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000002d000081a400000000000000000000000168ed21dd00001bf8000000000000000000000000000000000000002300000000mgradm/cmd/install/podman/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"errors"
"fmt"
"os/exec"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/coco"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/hub"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/pgsql"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/saline"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd shared_podman.Systemd = shared_podman.NewSystemd()
func installForPodman(
_ *types.GlobalFlags,
flags *podmanInstallFlags,
cmd *cobra.Command,
args []string,
) error {
hostData, err := shared_podman.InspectHost()
if err != nil {
return err
}
authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC)
if err != nil {
return utils.Error(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
if hostData.HasUyuniServer {
return errors.New(
L("Server is already initialized! Uninstall before attempting new installation or use upgrade command"),
)
}
flags.Installation.CheckParameters(cmd, "podman")
if _, err := exec.LookPath("podman"); err != nil {
return errors.New(L("install podman before running this command"))
}
fqdn, err := utils.GetFqdn(args)
if err != nil {
return err
}
log.Info().Msgf(L("Setting up the server with the FQDN '%s'"), fqdn)
preparedImage, preparedPgsqlImage, err := shared_podman.PrepareImages(authFile, flags.Image, flags.Pgsql)
if err != nil {
return utils.Errorf(err, L("cannot prepare images"))
}
if err := shared_podman.SetupNetwork(false); err != nil {
return utils.Error(err, L("cannot setup network"))
}
if err := podman.PrepareSSLCertificates(
preparedImage, &flags.Installation.SSL, flags.Installation.TZ, fqdn); err != nil {
return err
}
// Create all the database credentials secrets
if err := shared_podman.CreateCredentialsSecrets(
shared_podman.DBUserSecret, flags.Installation.DB.User,
shared_podman.DBPassSecret, flags.Installation.DB.Password,
); err != nil {
return err
}
if err := shared_podman.CreateCredentialsSecrets(
shared_podman.ReportDBUserSecret, flags.Installation.ReportDB.User,
shared_podman.ReportDBPassSecret, flags.Installation.ReportDB.Password,
); err != nil {
return err
}
if flags.ServerFlags.Installation.DB.IsLocal() {
// The admin password is not needed for external databases
if err := shared_podman.CreateCredentialsSecrets(
shared_podman.DBAdminUserSecret, flags.Installation.DB.Admin.User,
shared_podman.DBAdminPassSecret, flags.Installation.DB.Admin.Password,
); err != nil {
return err
}
// Run the DB container setup if the user doesn't set a custom host name for it.
if err := pgsql.SetupPgsql(systemd, preparedPgsqlImage); err != nil {
return err
}
} else {
log.Info().Msgf(
L("Skipped database container setup to use external database %s"),
flags.ServerFlags.Installation.DB.Host,
)
}
log.Info().Msg(L("Run setup command in the container"))
if err := runSetup(preparedImage, &flags.ServerFlags, fqdn); err != nil {
return err
}
cnx := shared.NewConnection("podman", shared_podman.ServerContainerName, "")
if err := podman.WaitForSystemStart(systemd, cnx, preparedImage, flags.Installation.TZ,
flags.Installation.Debug.Java, flags.Mirror, flags.Podman.Args); err != nil {
return utils.Error(err, L("cannot wait for system start"))
}
if err := cnx.CopyCaCertificate(fqdn); err != nil {
return utils.Error(err, L("failed to add SSL CA certificate to host trusted certificates"))
}
if path, err := exec.LookPath("uyuni-payg-extract-data"); err == nil {
// the binary is installed
err = utils.RunCmdStdMapping(zerolog.DebugLevel, path)
if err != nil {
return utils.Error(err, L("failed to extract payg data"))
}
}
if err := shared_podman.EnablePodmanSocket(); err != nil {
return utils.Error(err, L("cannot enable podman socket"))
}
if err := coco.SetupCocoContainer(
systemd, authFile, flags.Image.Registry, flags.Coco, flags.Image,
flags.Installation.DB,
); err != nil {
return err
}
if err := hub.SetupHubXmlrpc(
systemd, authFile, flags.Image.Registry, flags.Image.PullPolicy, flags.Image.Tag, flags.HubXmlrpc,
); err != nil {
return err
}
if err := saline.SetupSalineContainer(
systemd, authFile, flags.Image.Registry, flags.Saline, flags.Image, flags.Installation.TZ,
); err != nil {
return err
}
return nil
}
// runSetup execute the setup.
func runSetup(image string, flags *adm_utils.ServerFlags, fqdn string) error {
env := adm_utils.GetSetupEnv(flags.Mirror, &flags.Installation, fqdn, false)
envNames := []string{}
envValues := []string{}
for key, value := range env {
envNames = append(envNames, "-e", key)
envValues = append(envValues, fmt.Sprintf("%s=%s", key, value))
}
command := []string{
"run",
"--rm",
"--shm-size=0",
"--shm-size-systemd=0",
"--name", "uyuni-setup",
"--network", shared_podman.UyuniNetwork,
"-e", "TZ=" + flags.Installation.TZ,
"--secret", shared_podman.DBUserSecret + ",type=env,target=MANAGER_USER",
"--secret", shared_podman.DBPassSecret + ",type=env,target=MANAGER_PASS",
"--secret", shared_podman.ReportDBUserSecret + ",type=env,target=REPORT_DB_USER",
"--secret", shared_podman.ReportDBPassSecret + ",type=env,target=REPORT_DB_PASS",
"-e REPORT_DB_CA_CERT=" + ssl.DBCAContainerPath,
"--secret", shared_podman.DBCASecret + ",type=mount,target=" + ssl.DBCAContainerPath,
"--secret", shared_podman.CASecret + ",type=mount,target=" + ssl.CAContainerPath,
"--secret", shared_podman.CASecret + ",type=mount,target=/usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT",
"--secret", shared_podman.CASecret + ",type=mount,target=/srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT",
"--secret", shared_podman.SSLCertSecret + ",type=mount,target=" + ssl.ServerCertPath,
"--secret", shared_podman.SSLKeySecret + ",type=mount,target=" + ssl.ServerCertKeyPath,
}
for _, volume := range utils.ServerVolumeMounts {
command = append(command, "-v", fmt.Sprintf("%s:%s", volume.Name, volume.MountPath))
}
command = append(command, envNames...)
command = append(command, image)
script, err := adm_utils.GenerateSetupScript(&flags.Installation, false)
if err != nil {
return err
}
command = append(command, "/usr/bin/sh", "-e", "-c", script)
if _, err := newRunner("podman", command...).Env(envValues).StdMapping().Exec(); err != nil {
return utils.Error(err, L("server setup failed"))
}
log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Installation.Admin.Login)
return nil
}
var newRunner = utils.NewRunner
0707010000002e000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/install/podman0707010000002f000081a400000000000000000000000168ed21dd00000751000000000000000000000000000000000000002300000000mgradm/cmd/install/shared/flags.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
import (
"github.com/spf13/cobra"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// AddInstallFlags add flags to install command.
func AddInstallFlags(cmd *cobra.Command) {
cmd.Flags().String("tz", "", L("Time zone to set on the server. Defaults to the host timezone"))
cmd.Flags().String("email", "admin@example.com", L("Administrator e-mail"))
cmd.Flags().String("emailfrom", "notifications@example.com", L("E-Mail sending the notifications"))
cmd.Flags().String("issParent", "", L("InterServerSync v1 parent FQDN"))
cmd.Flags().Bool("tftp", true, L("Enable TFTP"))
cmd_utils.AddServerFlags(cmd)
cmd.Flags().Bool("debug-java", false, L("Enable tomcat and taskomatic remote debugging"))
cmd_utils.AddCocoFlag(cmd)
cmd_utils.AddHubXmlrpcFlags(cmd)
cmd_utils.AddSalineFlag(cmd)
cmd.Flags().String("admin-login", "admin", L("Administrator user name"))
cmd.Flags().String("admin-password", "", L("Administrator password"))
cmd.Flags().String("admin-firstName", "Administrator", L("First name of the administrator"))
cmd.Flags().String("admin-lastName", "McAdmin", L("Last name of the administrator"))
cmd.Flags().String("organization", "Organization", L("First organization name"))
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "first-user", Title: L("First User Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "admin-login", "first-user")
_ = utils.AddFlagToHelpGroupID(cmd, "admin-password", "first-user")
_ = utils.AddFlagToHelpGroupID(cmd, "admin-firstName", "first-user")
_ = utils.AddFlagToHelpGroupID(cmd, "admin-lastName", "first-user")
_ = utils.AddFlagToHelpGroupID(cmd, "organization", "first-user")
}
07070100000030000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/install/shared07070100000031000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/install07070100000032000081a400000000000000000000000168ed21dd0000084a000000000000000000000000000000000000002f00000000mgradm/cmd/migrate/kubernetes/dataExtractor.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"errors"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
"gopkg.in/yaml.v2"
)
// MigrationData represents the files and data extracted from the migration sync phase.
type MigrationData struct {
CaKey string
CaCert string
Data *utils.InspectResult
ServerCert string
ServerKey string
}
func extractMigrationData(
namespace string,
image string,
pullPolicy string,
pullSecret string,
volume types.VolumeMount,
) (*MigrationData, error) {
// Run a pod reading the extracted data files from the volume.
// The data are written as a YAML dictionary where the key is the file name and the value its content.
out, err := kubernetes.RunPodLogs(namespace, "uyuni-data-extractor", image,
pullPolicy, pullSecret, []types.VolumeMount{volume},
"sh", "-c",
"for f in /var/lib/uyuni-tools/*; do echo \"`basename $f`: |2\"; cat $f | sed 's/^/ /'; done",
)
if err != nil {
return nil, err
}
// Parse the content
files := make(map[string]string)
if err := yaml.Unmarshal(out, &files); err != nil {
return nil, utils.Errorf(err, L("failed to parse data extractor pod output"))
}
var result MigrationData
for file, content := range files {
if file == "RHN-ORG-PRIVATE-SSL-KEY" {
result.CaKey = content
} else if file == "RHN-ORG-TRUSTED-SSL-CERT" {
result.CaCert = content
} else if file == "spacewalk.crt" {
result.ServerCert = content
} else if file == "spacewalk.key" {
result.ServerKey = content
} else if file == "data" {
parsedData, err := utils.ReadInspectData[utils.InspectResult]([]byte(content))
if err != nil {
return nil, utils.Errorf(err, L("failed to parse migration data file"))
}
result.Data = parsedData
}
}
if result.Data == nil {
return nil, errors.New(L("found no data file after migration"))
}
return &result, nil
}
07070100000033000081a400000000000000000000000168ed21dd00000d7c000000000000000000000000000000000000002c00000000mgradm/cmd/migrate/kubernetes/kubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "kubernetes [source server FQDN]",
Short: L("Migrate a remote server to containers running on a kubernetes cluster"),
Long: L(`Migrate a remote server to containers running on a kubernetes cluster
This migration command assumes a few things:
* the SSH configuration for the source server is complete, including user and
all needed options to connect to the machine,
* kubectl and helm are installed locally,
* a working kubectl configuration should be set to connect to the cluster to deploy to
The SSH parameters may be left empty if the target Kubernetes namespace contains:
* an uyuni-migration-config ConfigMap with config and known_hosts items,
* an uyuni-migration-key secret with key and key.pub items with a passwordless key.
When migrating a server with a automatically generated SSL Root CA certificate, the private key
password will be required to convert it to RSA in a kubernetes secret.
This is not needed if the source server does not have a generated SSL CA certificate.
`),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags kubernetes.KubernetesServerFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
flags.ServerFlags.Pgsql.IsChanged = v.IsSet("pgsql.replicas")
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
cmd_utils.AddMirrorFlag(cmd)
shared.AddMigrateFlags(cmd)
cmd_utils.AddHelmInstallFlag(cmd)
cmd_utils.AddVolumesFlags(cmd)
cmd.Flags().String("ssh-key-public", "", L("Path to the SSH public key to use to connect to the source server"))
cmd.Flags().String("ssh-key-private", "",
L("Path to the passwordless SSH private key to use to connect to the source server"),
)
cmd.Flags().String("ssh-knownhosts", "", L("Path to the SSH known_hosts file to use to connect to the source server"))
cmd.Flags().String("ssh-config", "", L("Path to the SSH configuration file to use to connect to the source server"))
const sshGroupID = "ssh"
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: sshGroupID, Title: L("SSH Configuration Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-public", sshGroupID)
_ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-private", sshGroupID)
_ = utils.AddFlagToHelpGroupID(cmd, "ssh-knownhosts", sshGroupID)
_ = utils.AddFlagToHelpGroupID(cmd, "ssh-config", sshGroupID)
return cmd
}
// NewCommand for kubernetes migration.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, migrateToKubernetes)
}
07070100000034000081a400000000000000000000000168ed21dd00000e3d000000000000000000000000000000000000003100000000mgradm/cmd/migrate/kubernetes/kubernetes_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--prepare",
"--user", "sudoer",
"--ssl-password", "sslsecret",
"--ssh-key-public", "path/ssh.pub",
"--ssh-key-private", "path/ssh",
"--ssh-knownhosts", "path/known_hosts",
"--ssh-config", "path/config",
"source.fq.dn",
}
args = append(args, flagstests.MirrorFlagTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
args = append(args, flagstests.ImageFlagsTestArgs...)
args = append(args, flagstests.DBUpdateImageFlagTestArgs...)
args = append(args, flagstests.CocoFlagsTestArgs...)
args = append(args, flagstests.HubXmlrpcFlagsTestArgs...)
args = append(args, flagstests.SalineFlagsTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
args = append(args, flagstests.ServerKubernetesFlagsTestArgs...)
args = append(args, flagstests.VolumesFlagsTestExpected...)
args = append(args, flagstests.PgsqlFlagsTestArgs...)
args = append(args, flagstests.DBFlagsTestArgs...)
args = append(args, flagstests.ReportDBFlagsTestArgs...)
args = append(args, flagstests.InstallDBSSLFlagsTestArgs...)
args = append(args, flagstests.InstallSSLFlagsTestArgs...)
args = append(args, flagstests.SSLGenerationFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags,
_ *cobra.Command, args []string,
) error {
testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare)
flagstests.AssertMirrorFlag(t, flags.Mirror)
testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User)
testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0])
flagstests.AssertImageFlag(t, &flags.Image)
flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage)
flagstests.AssertCocoFlag(t, &flags.Coco)
flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc)
flagstests.AssertSalineFlag(t, &flags.Saline)
flagstests.AssertSCCFlag(t, &flags.Installation.SCC)
flagstests.AssertPgsqlFlag(t, &flags.Pgsql)
flagstests.AssertDBFlag(t, &flags.Installation.DB)
flagstests.AssertReportDBFlag(t, &flags.Installation.ReportDB)
flagstests.AssertInstallDBSSLFlag(t, &flags.Installation.SSL.DB)
flagstests.AssertInstallSSLFlag(t, &flags.Installation.SSL)
flagstests.AssertSSLGenerationFlag(t, &flags.Installation.SSL.SSLCertGenerationFlags)
testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password)
flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes)
flagstests.AssertVolumesFlags(t, &flags.Volumes)
testutils.AssertEquals(t, "Error parsing --ssh-key-public", "path/ssh.pub", flags.SSH.Key.Public)
testutils.AssertEquals(t, "Error parsing --ssh-key-private", "path/ssh", flags.SSH.Key.Private)
testutils.AssertEquals(t, "Error parsing --ssh-knownhosts", "path/known_hosts", flags.SSH.Knownhosts)
testutils.AssertEquals(t, "Error parsing --ssh-config", "path/config", flags.SSH.Config)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000035000081a400000000000000000000000168ed21dd00000e69000000000000000000000000000000000000002e00000000mgradm/cmd/migrate/kubernetes/migrationJob.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
batch "k8s.io/api/batch/v1"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const migrationJobName = "uyuni-data-sync"
// Prepares and starts the synchronization job.
//
// This assumes the SSH key is stored in an uyuni-migration-key secret
// and the SSH config in an uyuni-migration-ssh ConfigMap with config and known_hosts keys.
func startMigrationJob(
namespace string,
serverImage string,
pullPolicy string,
pullSecret string,
fqdn string,
user string,
prepare bool,
mounts []types.VolumeMount,
) (string, error) {
job, err := getMigrationJob(
namespace,
serverImage,
pullPolicy,
pullSecret,
mounts,
fqdn,
user,
prepare,
)
if err != nil {
return "", err
}
// Run the job
return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the migration job"))
}
func getMigrationJob(
namespace string,
image string,
pullPolicy string,
pullSecret string,
mounts []types.VolumeMount,
sourceFqdn string,
user string,
prepare bool,
) (*batch.Job, error) {
// Add mount and volume for the uyuni-migration-key secret with key and key.pub items
keyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa", SubPath: "id_rsa"}
pubKeyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa.pub", SubPath: "id_rsa.pub"}
keyVolume := kubernetes.CreateSecretVolume("ssh-key", "uyuni-migration-key")
var keyMode int32 = 0600
keyVolume.VolumeSource.Secret.Items = []core.KeyToPath{
{Key: "key", Path: "id_rsa", Mode: &keyMode},
{Key: "key.pub", Path: "id_rsa.pub"},
}
// Add mounts and volume for the uyuni-migration-ssh config map
// We need one mount for each file using subPath to not have 2 mounts on the same folder
knownHostsMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/known_hosts", SubPath: "known_hosts"}
sshConfMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/config", SubPath: "config"}
sshVolume := kubernetes.CreateConfigVolume("ssh-conf", "uyuni-migration-ssh")
// Prepare the script
scriptData := templates.MigrateScriptTemplateData{
Volumes: utils.ServerVolumeMounts,
SourceFqdn: sourceFqdn,
User: user,
Kubernetes: true,
Prepare: prepare,
}
job, err := kubernetes.GetScriptJob(namespace, migrationJobName, image, pullPolicy, pullSecret, mounts, scriptData)
if err != nil {
return nil, err
}
// Append the extra volumes and mounts
volumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts
volumes := job.Spec.Template.Spec.Volumes
volumeMounts = append(volumeMounts, keyMount, pubKeyMount, knownHostsMount, sshConfMount)
volumes = append(volumes, keyVolume, sshVolume)
job.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
job.Spec.Template.Spec.Volumes = volumes
initScript := `cp -a /etc/systemd/system/multi-user.target.wants/. /mnt/etc-systemd-multi`
job.Spec.Template.Spec.InitContainers = []core.Container{
{
Name: "init-volumes",
Image: image,
ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy),
Command: []string{"sh", "-c", initScript},
VolumeMounts: []core.VolumeMount{
{Name: "etc-systemd-multi", MountPath: "/mnt/etc-systemd-multi"},
},
},
}
return job, nil
}
07070100000036000081a400000000000000000000000168ed21dd0000011a000000000000000000000000000000000000002900000000mgradm/cmd/migrate/kubernetes/nobuild.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func NewCommand(_ *types.GlobalFlags) *cobra.Command {
return nil
}
07070100000037000081a400000000000000000000000168ed21dd000013bd000000000000000000000000000000000000002500000000mgradm/cmd/migrate/kubernetes/ssh.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
sshSecretName = "uyuni-migration-key"
sshConfigName = "uyuni-migration-ssh"
)
func checkSSH(namespace string, flags *adm_utils.SSHFlags) error {
if exists, err := checkSSHKey(namespace); err != nil {
return err
} else if !exists && flags.Key.Public != "" && flags.Key.Private != "" {
if err := createSSHSecret(namespace, flags.Key.Private, flags.Key.Public); err != nil {
return err
}
} else if !exists {
return errors.New(L("no SSH key found to use for migration"))
}
if exists, err := checkSSHConfig(namespace); err != nil {
return err
} else if !exists && flags.Knownhosts != "" {
// The config may be empty, but not the known_hosts
if err := createSSHConfig(namespace, flags.Config, flags.Knownhosts); err != nil {
return err
}
} else if !exists {
return errors.New(L("no SSH known_hosts and configuration found to use for migration"))
}
return nil
}
func checkSSHKey(namespace string) (bool, error) {
exists := false
out, err := utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, sshSecretName, "-o", "jsonpath={.data}",
)
if err != nil {
if strings.Contains(err.Error(), "NotFound") {
log.Debug().Msg("Not found!")
// The secret was not found, it's not really an error
return exists, nil
}
return exists, utils.Errorf(err, L("failed to get %s SSH key secret"), sshSecretName)
}
exists = true
var data map[string]string
if err := json.Unmarshal(out, &data); err != nil {
return exists, err
}
for _, key := range []string{"key", "key.pub"} {
if value, ok := data[key]; !ok || value == "" {
return exists, fmt.Errorf(L("%[1]s secret misses the %[2]s value"), sshSecretName, key)
}
}
return exists, nil
}
func createSSHSecret(namespace string, keyPath string, pubKeyPath string) error {
keyContent, err := os.ReadFile(keyPath)
if err != nil {
return utils.Errorf(err, L("failed to read key file %s"), keyPath)
}
pubContent, err := os.ReadFile(pubKeyPath)
if err != nil {
return utils.Errorf(err, L("failed to read public key file %s"), pubKeyPath)
}
secret := core.Secret{
TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: sshSecretName,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
// It seems serializing this object automatically transforms the secrets to base64.
Data: map[string][]byte{
"key": keyContent,
"key.pub": pubContent,
},
}
return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the SSH migration secret"))
}
func checkSSHConfig(namespace string) (bool, error) {
exists := false
out, err := utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "cm", "-n", namespace, sshConfigName, "-o", "jsonpath={.data}",
)
if err != nil {
if strings.Contains(err.Error(), "NotFound") {
// The config map was not found, it's not really an error
return exists, nil
}
return exists, utils.Errorf(err, L("failed to get %s SSH ConfigMap"), sshConfigName)
}
exists = true
var data map[string]string
if err := json.Unmarshal(out, &data); err != nil {
return exists, utils.Errorf(err, L("failed to parse SSH ConfigMap data"))
}
// The known_hosts has to contain at least the entry for the source server.
if value, ok := data["known_hosts"]; !ok || value == "" {
return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "known_hosts")
}
// An empty config is not an error.
if _, ok := data["config"]; !ok {
return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "config")
}
return exists, nil
}
func createSSHConfig(namespace string, configPath string, KnownhostsPath string) error {
configContent, err := os.ReadFile(configPath)
if err != nil {
return utils.Errorf(err, L("failed to read SSH config file %s"), configPath)
}
knownhostsContent, err := os.ReadFile(KnownhostsPath)
if err != nil {
return utils.Errorf(err, L("failed to read SSH known_hosts file %s"), KnownhostsPath)
}
configMap := core.ConfigMap{
TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: sshConfigName},
Data: map[string]string{
"config": string(configContent),
"known_hosts": string(knownhostsContent),
},
}
return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap"))
}
07070100000038000081a400000000000000000000000168ed21dd0000101a000000000000000000000000000000000000002700000000mgradm/cmd/migrate/kubernetes/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"os"
"path"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
const migrationDataPvcName = "migration-data"
func migrateToKubernetes(
_ *types.GlobalFlags,
flags *kubernetes.KubernetesServerFlags,
_ *cobra.Command,
args []string,
) error {
namespace := flags.Kubernetes.Uyuni.Namespace
// Create the namespace if not present
if err := kubernetes.CreateNamespace(namespace); err != nil {
return err
}
// Check the for the required SSH key and configuration
if err := checkSSH(namespace, &flags.SSH); err != nil {
return err
}
serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image)
if err != nil {
return utils.Errorf(err, L("failed to compute image URL"))
}
fqdn := args[0]
if err := utils.IsValidFQDN(fqdn); err != nil {
return err
}
mounts := kubernetes.GetServerMounts()
mounts = kubernetes.TuneMounts(mounts, &flags.Volumes)
// Add a mount and volume for the extracted data
migrationDataVolume := types.VolumeMount{Name: migrationDataPvcName, MountPath: "/var/lib/uyuni-tools"}
migrationMounts := append(mounts, migrationDataVolume)
if err := shared_kubernetes.CreatePersistentVolumeClaims(namespace, migrationMounts); err != nil {
return err
}
// Create a secret using SCC credentials if any are provided
pullSecret, err := shared_kubernetes.GetRegistrySecret(
flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp,
)
if err != nil {
return err
}
jobName, err := startMigrationJob(
namespace,
serverImage,
flags.Image.PullPolicy,
pullSecret,
fqdn,
flags.Migration.User,
flags.Migration.Prepare,
migrationMounts,
)
if err != nil {
return err
}
// Wait for ever for the job to finish: the duration of this job depends on the amount of data to copy
if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil {
return err
}
// Read the extracted data from the migration volume
extractedData, err := extractMigrationData(
namespace, serverImage, flags.Image.PullPolicy, pullSecret, migrationDataVolume,
)
if err != nil {
return err
}
flags.Installation.TZ = extractedData.Data.Timezone
flags.Installation.Debug.Java = extractedData.Data.Debug
if extractedData.Data.HasHubXmlrpcAPI {
flags.HubXmlrpc.Replicas = 1
flags.HubXmlrpc.IsChanged = true
}
flags.Installation.DB.User = extractedData.Data.DBUser
flags.Installation.DB.Password = extractedData.Data.DBPassword
// TODO Are those two really needed in migration?
flags.Installation.DB.Name = extractedData.Data.DBName
flags.Installation.DB.Port = extractedData.Data.DBPort
sslDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
// Extract the SSL data as files and pass them as arguments to share code with installation.
if err := writeToFile(
extractedData.CaCert, path.Join(sslDir, "ca.crt"), &flags.Installation.SSL.Ca.Root,
); err != nil {
return err
}
// The CA key shouldn't be stored as a temporary file.
if extractedData.CaKey != "" {
flags.Installation.SSL.Ca.Key = extractedData.CaKey
}
if err := writeToFile(
extractedData.ServerCert, path.Join(sslDir, "srv.crt"), &flags.Installation.SSL.Server.Cert,
); err != nil {
return err
}
if err := writeToFile(
extractedData.ServerKey, path.Join(sslDir, "srv.key"), &flags.Installation.SSL.Server.Key,
); err != nil {
return err
}
return kubernetes.Reconcile(flags, fqdn)
}
func writeToFile(content string, file string, flag *string) error {
if content != "" {
if err := os.WriteFile(file, []byte(content), 0600); err != nil {
return utils.Errorf(err, L("failed to write certificate to %s"), file)
}
*flag = file
}
return nil
}
07070100000039000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001e00000000mgradm/cmd/migrate/kubernetes0707010000003a000081a400000000000000000000000168ed21dd00000374000000000000000000000000000000000000001e00000000mgradm/cmd/migrate/migrate.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package migrate
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/kubernetes"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand for migration.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
migrateCmd := &cobra.Command{
Use: "migrate [source server FQDN]",
GroupID: "deploy",
Short: L("Migrate a remote server to containers"),
Long: L("Migrate a remote server to containers"),
}
migrateCmd.AddCommand(podman.NewCommand(globalFlags))
if kubernetesCmd := kubernetes.NewCommand(globalFlags); kubernetesCmd != nil {
migrateCmd.AddCommand(kubernetesCmd)
}
return migrateCmd
}
0707010000003b000081a400000000000000000000000168ed21dd00000878000000000000000000000000000000000000002400000000mgradm/cmd/migrate/podman/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
podman_utils "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type podmanMigrateFlags struct {
adm_utils.ServerFlags `mapstructure:",squash"`
Podman podman_utils.PodmanFlags
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanMigrateFlags]) *cobra.Command {
migrateCmd := &cobra.Command{
Use: "podman [source server FQDN]",
Short: L("Migrate a remote server to containers running on podman"),
Long: L(`Migrate a remote server to containers running on podman
This migration command assumes a few things:
* the SSH configuration for the source server is complete, including user and
all needed options to connect to the machine,
* an SSH agent is started and the key to use to connect to the server is added to it,
* podman is installed locally
NOTE: migrating to a remote podman is not supported yet!
`),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags podmanMigrateFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
flags.ServerFlags.Pgsql.IsChanged = v.IsSet("pgsql.replicas")
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
adm_utils.AddMirrorFlag(migrateCmd)
shared.AddMigrateFlags(migrateCmd)
podman_utils.AddPodmanArgFlag(migrateCmd)
return migrateCmd
}
// NewCommand for podman migration.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, migrateToPodman)
}
0707010000003c000081a400000000000000000000000168ed21dd000005b9000000000000000000000000000000000000002900000000mgradm/cmd/migrate/podman/podman_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--prepare",
"--user", "sudoer",
"source.fq.dn",
}
args = append(args, flagstests.MirrorFlagTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
args = append(args, flagstests.PodmanFlagsTestArgs...)
args = append(args, flagstests.ServerFlagsTestArgs()...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *podmanMigrateFlags,
_ *cobra.Command, args []string,
) error {
testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare)
flagstests.AssertMirrorFlag(t, flags.Mirror)
testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User)
testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0])
flagstests.AssertPodmanInstallFlags(t, &flags.Podman)
flagstests.AssertServerFlags(t, &flags.ServerFlags)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000003d000081a400000000000000000000000168ed21dd0000059d000000000000000000000000000000000000002300000000mgradm/cmd/migrate/podman/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"errors"
"os/exec"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
podman_utils "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman_utils.Systemd = podman_utils.NewSystemd()
func migrateToPodman(
_ *types.GlobalFlags,
flags *podmanMigrateFlags,
cmd *cobra.Command,
args []string,
) error {
hostData, err := podman_utils.InspectHost()
if err != nil {
return err
}
authFile, cleaner, err := podman_utils.PodmanLogin(hostData, flags.Installation.SCC)
if err != nil {
return utils.Errorf(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
flags.Installation.CheckUpgradeParameters(cmd, "podman")
if _, err := exec.LookPath("podman"); err != nil {
return errors.New(L("install podman before running this command"))
}
return podman.Migrate(
systemd, authFile,
flags.Image.Registry,
flags.Installation.DB,
flags.Installation.ReportDB,
flags.Installation.SSL,
flags.Image,
flags.DBUpgradeImage,
flags.Coco,
flags.HubXmlrpc,
flags.Saline,
flags.Pgsql,
flags.Migration.Prepare,
flags.Migration.User,
flags.Mirror,
flags.Podman,
args,
)
}
0707010000003e000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/migrate/podman0707010000003f000081a400000000000000000000000168ed21dd00000333000000000000000000000000000000000000002300000000mgradm/cmd/migrate/shared/flags.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
import (
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
)
// AddMigrateFlags add migration flags to a command.
func AddMigrateFlags(cmd *cobra.Command) {
cmd.Flags().Bool("prepare", false, L("Prepare the migration - copy the data without stopping the source server."))
cmd.Flags().String("user", "root",
L("User on the source server. Non-root user must have passwordless sudo privileges (NOPASSWD tag in /etc/sudoers)."),
)
adm_utils.AddServerFlags(cmd)
adm_utils.AddDBUpgradeImageFlag(cmd)
adm_utils.AddUpgradeCocoFlag(cmd)
adm_utils.AddUpgradeHubXmlrpcFlags(cmd)
adm_utils.AddUpgradeSalineFlag(cmd)
}
07070100000040000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/migrate/shared07070100000041000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/migrate07070100000042000081a400000000000000000000000168ed21dd0000030e000000000000000000000000000000000000002100000000mgradm/cmd/restart/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package restart
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func kubernetesRestart(
_ *types.GlobalFlags,
_ *restartFlags,
_ *cobra.Command,
_ []string,
) error {
cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
namespace, err := cnx.GetNamespace("")
if err != nil {
return utils.Errorf(err, L("failed retrieving namespace"))
}
return kubernetes.Restart(namespace, kubernetes.ServerApp)
}
07070100000043000081a400000000000000000000000168ed21dd000001ba000000000000000000000000000000000000002300000000mgradm/cmd/restart/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package restart
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesRestart(
_ *types.GlobalFlags,
_ *restartFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("built without kubernetes support"))
}
07070100000044000081a400000000000000000000000168ed21dd000002b7000000000000000000000000000000000000001d00000000mgradm/cmd/restart/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restart
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
func podmanRestart(
_ *types.GlobalFlags,
_ *restartFlags,
_ *cobra.Command,
_ []string,
) error {
return utils.JoinErrors(
systemd.RestartService(podman.DBService),
systemd.RestartService(podman.ServerService),
systemd.RestartInstantiated(podman.ServerAttestationService),
systemd.RestartInstantiated(podman.HubXmlrpcService),
)
}
07070100000045000081a400000000000000000000000168ed21dd00000574000000000000000000000000000000000000001e00000000mgradm/cmd/restart/restart.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restart
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type restartFlags struct {
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[restartFlags]) *cobra.Command {
restartCmd := &cobra.Command{
Use: "restart",
GroupID: "management",
Short: L("Restart the server"),
Long: L("Restart the server"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags restartFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
restartCmd.SetUsageTemplate(restartCmd.UsageTemplate())
if utils.KubernetesBuilt {
utils.AddBackendFlag(restartCmd)
}
return restartCmd
}
// NewCommand to restart server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, restart)
}
func restart(globalFlags *types.GlobalFlags, flags *restartFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), podmanRestart, kubernetesRestart)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
07070100000046000081a400000000000000000000000168ed21dd000003d1000000000000000000000000000000000000002300000000mgradm/cmd/restart/restart_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package restart
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *restartFlags,
_ *cobra.Command, _ []string,
) error {
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000047000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/restart07070100000048000081a400000000000000000000000168ed21dd000001b1000000000000000000000000000000000000001f00000000mgradm/cmd/scale/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package scale
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesScale(
_ *types.GlobalFlags,
_ *scaleFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("kubernetes not supported yet"))
}
07070100000049000081a400000000000000000000000168ed21dd000001b4000000000000000000000000000000000000002100000000mgradm/cmd/scale/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package scale
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesScale(
_ *types.GlobalFlags,
_ *scaleFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("built without kubernetes support"))
}
0707010000004a000081a400000000000000000000000168ed21dd000003b1000000000000000000000000000000000000001b00000000mgradm/cmd/scale/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package scale
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
)
var systemd podman.Systemd = podman.NewSystemd()
func podmanScale(
_ *types.GlobalFlags,
flags *scaleFlags,
_ *cobra.Command,
args []string,
) error {
newReplicas := flags.Replicas
service := args[0]
if service == podman.ServerAttestationService {
return systemd.ScaleService(newReplicas, service)
}
if service == podman.HubXmlrpcService || service == podman.SalineService {
if newReplicas > 1 {
return errors.New(L("Multiple container replicas are not currently supported."))
}
return systemd.ScaleService(newReplicas, service)
}
return fmt.Errorf(L("service not allowing to be scaled: %s"), service)
}
0707010000004b000081a400000000000000000000000168ed21dd0000069a000000000000000000000000000000000000001a00000000mgradm/cmd/scale/scale.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package scale
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type scaleFlags struct {
Backend string
Replicas int
}
func addScaleFlags(cmd *cobra.Command) {
cmd.Flags().Int("replicas", 0, L("How many replicas of a service should be started."))
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[scaleFlags]) *cobra.Command {
scaleCmd := &cobra.Command{
Use: "scale",
GroupID: "management",
Short: L("Adjust the replicas for services supporting it."),
Long: L(`Adjust the replicas for services supporting it.
Supported services:
- uyuni-hub-xmlrpc
- uyuni-saline
- uyuni-server-attestation
`),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags scaleFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
scaleCmd.SetUsageTemplate(scaleCmd.UsageTemplate())
addScaleFlags(scaleCmd)
if utils.KubernetesBuilt {
utils.AddBackendFlag(scaleCmd)
}
return scaleCmd
}
// NewCommand adjusts a containers replicas.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, scale)
}
func scale(globalFlags *types.GlobalFlags, flags *scaleFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), podmanScale, kubernetesScale)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
0707010000004c000081a400000000000000000000000168ed21dd00000496000000000000000000000000000000000000001f00000000mgradm/cmd/scale/scale_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package scale
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--replicas", "2",
"some-service",
}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *scaleFlags, _ *cobra.Command, args []string) error {
testutils.AssertEquals(t, "Error parsing --replicas", 2, flags.Replicas)
testutils.AssertEquals(t, "Error parsing the service name", "some-service", args[0])
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000004d000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001100000000mgradm/cmd/scale0707010000004e000081a400000000000000000000000168ed21dd000001a6000000000000000000000000000000000000002700000000mgradm/cmd/server/rename/kubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package rename
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func renameForKubernetes(_ *types.GlobalFlags, _ *renameFlags, _ *cobra.Command, _ []string) error {
return errors.New(L("not implemented yet"))
}
0707010000004f000081a400000000000000000000000168ed21dd00000144000000000000000000000000000000000000002400000000mgradm/cmd/server/rename/nobuild.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package rename
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func renameForKubernetes(_ *types.GlobalFlags, _ *renameFlags, _ *cobra.Command, _ []string) error {
return nil
}
07070100000050000081a400000000000000000000000168ed21dd00001021000000000000000000000000000000000000002300000000mgradm/cmd/server/rename/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package rename
import (
"fmt"
"os"
"path"
"regexp"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
adm_podman "github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.SystemdImpl{}
func renameForPodman(_ *types.GlobalFlags, flags *renameFlags, _ *cobra.Command, args []string) error {
fqdn, err := utils.GetFqdn(args)
if err != nil {
return err
}
// Regenerate Server SSL certificate if requested
image := podman.GetServiceImage(podman.ServerService)
tz := findTimezone("podman")
log.Info().Msg(L("Preparing SSL certificates to match the new hostname"))
if err := adm_podman.PrepareSSLCertificates(image, &flags.SSL, tz, fqdn); err != nil {
return err
}
log.Info().Msg(L("Stopping the server container"))
if err := systemd.StopService(podman.ServerService); err != nil {
return err
}
log.Info().Msgf(L("Changing the UYUNI_HOSTNAME to %s"), fqdn)
if err := alterHostnameConfig(fqdn); err != nil {
return err
}
// Update the service to ensure it has -e UYUNI_HOSTNAME
if err := adm_podman.UpdateServerSystemdService(); err != nil {
return err
}
// Restart the server container: the UYUNI_HOSTNAME change will be picked up by the uyuni-update-config service
log.Info().Msg(L("Starting the server container"))
err = systemd.StartService(podman.ServerService)
if err != nil {
return err
}
log.Info().Msg(L(`The renaming continues inside the server container.
The logs can be found in journalctl -u uyuni-config-update.service output.`))
return nil
}
func findTimezone(backend string) string {
// If the container is running, call podman exec uyuni-server sh -c "echo $TZ".
cnx := shared.NewConnection(backend, podman.ServerContainerName, "")
out, err := cnx.Exec("echo", "$TZ")
if err == nil {
return strings.TrimSpace(string(out))
}
if backend == "podman" {
// Otherwise get the value from the uyuni-server.service.d/custom.conf file, in the 'Environment=TZ=' line.
// In theory users shouldn't remove this, but who knows what could happen?
log.Debug().Msg("Failed to get the timezone from the container, looking for it in systemd configuration file")
if env, err := systemd.Show(podman.ServerService, "Environment"); err == nil {
pattern := regexp.MustCompile("TZ=([^[:space]]*)")
matches := pattern.FindStringSubmatch(env)
if len(matches) == 1 {
return matches[0]
}
}
}
log.Debug().Msg("Failed to get the timezone from the configuration, getting the host one")
return utils.GetLocalTimezone()
}
// alterHostnameConfig changes the UYUNI_HOSTNAME value in the server systemd service or adds it if needed.
func alterHostnameConfig(fqdn string) error {
config, err := readCustomConf()
if err != nil {
return err
}
// Append Environment=UYUNI_HOSTNAME={{.fqdn}} or replace the value
pattern := regexp.MustCompile(`(?m)^Environment=UYUNI_HOSTNAME=.*$`)
newConfig := pattern.ReplaceAllString(config, "Environment=UYUNI_HOSTNAME="+fqdn)
if config == newConfig {
newConfig = fmt.Sprintf("%s\nEnvironment=UYUNI_HOSTNAME=%s\n", config, fqdn)
}
systemdConfPath := podman.GetServiceConfFolder(podman.ServerService)
customConf := path.Join(systemdConfPath, podman.CustomConf)
if err := os.WriteFile(customConf, []byte(newConfig), 0640); err != nil {
return utils.Error(err, L("failed to write custom.conf with the new hostname"))
}
return systemd.ReloadDaemon(false)
}
func readCustomConf() (config string, err error) {
systemdConfPath := podman.GetServiceConfFolder(podman.ServerService)
customConf := path.Join(systemdConfPath, podman.CustomConf)
out, err := os.ReadFile(customConf)
if err != nil {
return "", utils.Error(err, L("failed to read the custom.conf file"))
}
config = string(out)
return config, nil
}
07070100000051000081a400000000000000000000000168ed21dd000007c9000000000000000000000000000000000000002300000000mgradm/cmd/server/rename/rename.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package rename
import (
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type renameFlags struct {
Backend string
SSL adm_utils.InstallSSLFlags
}
// NewCommand creates a CLI command to rename the server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "rename [New FQDN]",
Short: L("Change the host name of the server"),
Long: L(`Set the FQDN of the server to a new value.
If no Fully Qualified Domain Name is passed, the one from the running machine will be used.
Changing the name of the server may involve updating SSL certificates to match the new name,
but also altering various configurations inside the containers.
The uyuni-server container will be stopped during the rename and
a refresh of the pillars of each registered system will be triggered.
`),
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
var flags renameFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, rename)
},
}
utils.AddBackendFlag(cmd)
ssl.AddSSLGenerationFlags(cmd)
ssl.AddSSLThirdPartyFlags(cmd)
ssl.AddSSLDBThirdPartyFlags(cmd)
cmd.Flags().String("ssl-password", "", L("Password for the CA key to generate"))
_ = utils.AddFlagToHelpGroupID(cmd, "ssl-password", ssl.GeneratedFlagsGroup)
return cmd
}
func rename(globalFlags *types.GlobalFlags, flags *renameFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), renameForPodman, renameForKubernetes)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
07070100000052000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001900000000mgradm/cmd/server/rename07070100000053000081a400000000000000000000000168ed21dd00000289000000000000000000000000000000000000001c00000000mgradm/cmd/server/server.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package server
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/server/rename"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand creates a sub command for all server-related actions.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "server",
GroupID: "tool",
Short: L("Server management utilities"),
Args: cobra.ExactArgs(1),
}
cmd.AddCommand(rename.NewCommand(globalFlags))
return cmd
}
07070100000054000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001200000000mgradm/cmd/server07070100000055000081a400000000000000000000000168ed21dd00000306000000000000000000000000000000000000001f00000000mgradm/cmd/start/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package start
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func kubernetesStart(
_ *types.GlobalFlags,
_ *startFlags,
_ *cobra.Command,
_ []string,
) error {
cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
namespace, err := cnx.GetNamespace("")
if err != nil {
return utils.Errorf(err, L("failed retrieving namespace"))
}
return kubernetes.Start(namespace, kubernetes.ServerApp)
}
07070100000056000081a400000000000000000000000168ed21dd000001b4000000000000000000000000000000000000002100000000mgradm/cmd/start/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package start
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesStart(
_ *types.GlobalFlags,
_ *startFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("built without kubernetes support"))
}
07070100000057000081a400000000000000000000000168ed21dd0000017f000000000000000000000000000000000000001b00000000mgradm/cmd/start/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package start
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func podmanStart(
_ *types.GlobalFlags,
_ *startFlags,
_ *cobra.Command,
_ []string,
) error {
return podman.StartServices()
}
07070100000058000081a400000000000000000000000168ed21dd00000552000000000000000000000000000000000000001a00000000mgradm/cmd/start/start.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package start
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type startFlags struct {
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[startFlags]) *cobra.Command {
startCmd := &cobra.Command{
Use: "start",
GroupID: "management",
Short: L("Start the server"),
Long: L("Start the server"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags startFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
startCmd.SetUsageTemplate(startCmd.UsageTemplate())
if utils.KubernetesBuilt {
utils.AddBackendFlag(startCmd)
}
return startCmd
}
// NewCommand starts the server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, start)
}
func start(globalFlags *types.GlobalFlags, flags *startFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), podmanStart, kubernetesStart)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
07070100000059000081a400000000000000000000000168ed21dd000003c8000000000000000000000000000000000000001f00000000mgradm/cmd/start/start_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package start
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *startFlags, _ *cobra.Command, _ []string) error {
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000005a000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001100000000mgradm/cmd/start0707010000005b000081a400000000000000000000000168ed21dd00000624000000000000000000000000000000000000002000000000mgradm/cmd/status/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package status
import (
"errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func kubernetesStatus(
_ *types.GlobalFlags,
_ *statusFlags,
_ *cobra.Command,
_ []string,
) error {
cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
namespace, err := cnx.GetNamespace("")
if err != nil {
return utils.Errorf(err, L("failed to find the uyuni deployment namespace"))
}
// Is the pod running? Do we have all the replicas?
status, err := kubernetes.GetDeploymentStatus(namespace, kubernetes.ServerApp)
if err != nil {
return utils.Errorf(err, L("failed to get deployment status"))
}
if status.Replicas != status.ReadyReplicas {
log.Warn().Msgf(L("Some replicas are not ready: %[1]d / %[2]d"), status.ReadyReplicas, status.Replicas)
}
if status.AvailableReplicas == 0 {
return errors.New(L("the pod is not running"))
}
// Are the services running in the container?
if err := adm_utils.ExecCommand(zerolog.InfoLevel, cnx, "spacewalk-service", "status"); err != nil {
return utils.Errorf(err, L("failed to run spacewalk-service status"))
}
return nil
}
0707010000005c000081a400000000000000000000000168ed21dd000001b7000000000000000000000000000000000000002200000000mgradm/cmd/status/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package status
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesStatus(
_ *types.GlobalFlags,
_ *statusFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("built without kubernetes support"))
}
0707010000005d000081a400000000000000000000000168ed21dd0000071c000000000000000000000000000000000000001c00000000mgradm/cmd/status/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package status
import (
"fmt"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
func podmanStatus(
_ *types.GlobalFlags,
_ *statusFlags,
_ *cobra.Command,
_ []string,
) error {
if systemd.HasService(podman.DBService) {
_ = utils.RunCmdStdMapping(zerolog.DebugLevel, "systemctl", "status", "--no-pager", podman.DBService)
}
// Show the status and that's it if the service is not running
if !systemd.IsServiceRunning(podman.ServerService) {
_ = utils.RunCmdStdMapping(zerolog.DebugLevel, "systemctl", "status", "--no-pager", podman.ServerService)
} else {
// Run spacewalk-service status in the container
cnx := shared.NewConnection("podman", podman.ServerContainerName, "")
_ = adm_utils.ExecCommand(zerolog.InfoLevel, cnx, "spacewalk-service", "status")
}
for i := 0; i < systemd.CurrentReplicaCount(podman.ServerAttestationService); i++ {
println() // add an empty line between the previous logs and this one
_ = utils.RunCmdStdMapping(
zerolog.DebugLevel, "systemctl", "status", "--no-pager", fmt.Sprintf("%s@%d", podman.ServerAttestationService, i),
)
}
for i := 0; i < systemd.CurrentReplicaCount(podman.HubXmlrpcService); i++ {
println() // add an empty line between the previous logs and this one
_ = utils.RunCmdStdMapping(
zerolog.DebugLevel, "systemctl", "status", "--no-pager", fmt.Sprintf("%s@%d", podman.HubXmlrpcService, i),
)
}
return nil
}
0707010000005e000081a400000000000000000000000168ed21dd00000597000000000000000000000000000000000000001c00000000mgradm/cmd/status/status.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package status
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type statusFlags struct {
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[statusFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "status",
GroupID: "management",
Short: L("Get the server status"),
Long: L("Get the server status"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags statusFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
cmd.SetUsageTemplate(cmd.UsageTemplate())
return cmd
}
// NewCommand to get the status of the server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, status)
}
func status(globalFlags *types.GlobalFlags, flags *statusFlags, cmd *cobra.Command, args []string) error {
if systemd.HasService(podman.ServerService) {
return podmanStatus(globalFlags, flags, cmd, args)
}
if utils.IsInstalled("kubectl") && utils.IsInstalled("helm") {
return kubernetesStatus(globalFlags, flags, cmd, args)
}
return errors.New(L("no installed server detected"))
}
0707010000005f000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001200000000mgradm/cmd/status07070100000060000081a400000000000000000000000168ed21dd00000302000000000000000000000000000000000000001e00000000mgradm/cmd/stop/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package stop
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func kubernetesStop(
_ *types.GlobalFlags,
_ *stopFlags,
_ *cobra.Command,
_ []string,
) error {
cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
namespace, err := cnx.GetNamespace("")
if err != nil {
return utils.Errorf(err, L("failed retrieving namespace"))
}
return kubernetes.Stop(namespace, kubernetes.ServerApp)
}
07070100000061000081a400000000000000000000000168ed21dd000001b1000000000000000000000000000000000000002000000000mgradm/cmd/stop/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package stop
import (
"errors"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func kubernetesStop(
_ *types.GlobalFlags,
_ *stopFlags,
_ *cobra.Command,
_ []string,
) error {
return errors.New(L("built without kubernetes support"))
}
07070100000062000081a400000000000000000000000168ed21dd0000017b000000000000000000000000000000000000001a00000000mgradm/cmd/stop/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package stop
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func podmanStop(
_ *types.GlobalFlags,
_ *stopFlags,
_ *cobra.Command,
_ []string,
) error {
return podman.StopServices()
}
07070100000063000081a400000000000000000000000168ed21dd0000053f000000000000000000000000000000000000001800000000mgradm/cmd/stop/stop.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package stop
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type stopFlags struct {
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[stopFlags]) *cobra.Command {
stopCmd := &cobra.Command{
Use: "stop",
GroupID: "management",
Short: L("Stop the server"),
Long: L("Stop the server"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags stopFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
stopCmd.SetUsageTemplate(stopCmd.UsageTemplate())
if utils.KubernetesBuilt {
utils.AddBackendFlag(stopCmd)
}
return stopCmd
}
// NewCommand to stop server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, stop)
}
func stop(globalFlags *types.GlobalFlags, flags *stopFlags, cmd *cobra.Command, args []string) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), podmanStop, kubernetesStop)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
07070100000064000081a400000000000000000000000168ed21dd000003cb000000000000000000000000000000000000001d00000000mgradm/cmd/stop/stop_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package stop
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *stopFlags,
_ *cobra.Command, _ []string,
) error {
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000065000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001000000000mgradm/cmd/stop07070100000066000081a400000000000000000000000168ed21dd00000479000000000000000000000000000000000000002400000000mgradm/cmd/support/config/config.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package config
import (
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type configFlags struct {
Output string
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[configFlags]) *cobra.Command {
configCmd := &cobra.Command{
Use: "config",
Short: L("Extract configuration and logs"),
Long: L(`Extract the host or cluster configuration and logs as well as those from
the containers for support to help debugging.`),
RunE: func(cmd *cobra.Command, args []string) error {
var flags configFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
configCmd.Flags().StringP("output", "o", ".", L("path where to extract the data"))
utils.AddBackendFlag(configCmd)
return configCmd
}
// NewCommand is the command for creates supportconfig.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, extract)
}
07070100000067000081a400000000000000000000000168ed21dd000003cb000000000000000000000000000000000000002900000000mgradm/cmd/support/config/config_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package config
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--output", "path/to/output.tar.gz",
"--backend", "kubectl",
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *configFlags,
_ *cobra.Command, _ []string,
) error {
testutils.AssertEquals(t, "Error parsing --output", "path/to/output.tar.gz", flags.Output)
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000068000081a400000000000000000000000168ed21dd00000817000000000000000000000000000000000000002700000000mgradm/cmd/support/config/extractor.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package config
import (
"os"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
func filesRemover(files []string) {
for _, file := range files {
if !utils.FileExists(file) {
log.Trace().Msgf("%s will not removed since it doesn't exists", file)
continue
}
if err := os.Remove(file); err != nil {
log.Error().Err(err).Msgf(L("failed to remove %s temporary file"), file)
}
}
}
func extract(_ *types.GlobalFlags, flags *configFlags, _ *cobra.Command, _ []string) error {
containerName, err := shared.ChooseObjPodmanOrKubernetes(systemd, podman.ServerContainerName, kubernetes.ServerApp)
if err != nil {
return err
}
cnx := shared.NewConnection(flags.Backend, containerName, kubernetes.ServerFilter)
// Copy the generated file locally
tmpDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
fileList, err := cnx.RunSupportConfig(tmpDir)
if err != nil {
return err
}
var fileListHost []string
if systemd.HasService(podman.ServerService) {
fileListHost, err = podman.RunSupportConfigOnPodmanHost(systemd, tmpDir)
}
defer filesRemover(fileListHost)
if err != nil {
return err
}
if utils.IsInstalled("kubectl") && utils.IsInstalled("helm") {
var namespace string
namespace, err = cnx.GetNamespace("")
if err != nil {
return err
}
fileListHost, err = kubernetes.RunSupportConfigOnKubernetesHost(tmpDir, namespace, kubernetes.ServerFilter)
}
if err != nil {
return err
}
if len(fileListHost) > 0 {
fileList = append(fileList, fileListHost...)
}
return utils.CreateSupportConfigTarball(flags.Output, fileList)
}
07070100000069000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/support/config0707010000006a000081a400000000000000000000000168ed21dd00000146000000000000000000000000000000000000002000000000mgradm/cmd/support/ptf/noptf.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !ptf
package ptf
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand is the command for creates supportptf.
func NewCommand(_ *types.GlobalFlags) *cobra.Command {
return nil
}
0707010000006b000081a400000000000000000000000168ed21dd00000645000000000000000000000000000000000000002800000000mgradm/cmd/support/ptf/podman/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build ptf
package podman
import (
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type podmanPTFFlags struct {
adm_utils.ServerFlags `mapstructure:",squash"`
Podman podman.PodmanFlags
PTFId string `mapstructure:"ptf"`
TestID string `mapstructure:"test"`
CustomerID string `mapstructure:"user"`
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanPTFFlags]) *cobra.Command {
podmanCmd := &cobra.Command{
Use: "podman",
Short: L("Install a PTF or Test package on podman"),
Long: L(`Install a PTF or Test package on podman
The support ptf podman command assumes podman is installed locally and
the host machine is register to SCC.
NOTE: for now installing on a remote podman is not supported!
`),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags podmanPTFFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
adm_utils.AddSCCFlag(podmanCmd)
utils.AddPTFFlag(podmanCmd)
utils.AddPullPolicyFlag(podmanCmd)
return podmanCmd
}
// NewCommand for podman installation.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, ptfForPodman)
}
0707010000006c000081a400000000000000000000000168ed21dd00000545000000000000000000000000000000000000002d00000000mgradm/cmd/support/ptf/podman/podman_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build ptf
package podman
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--ptf", "ptf123",
"--test", "test123",
"--user", "sccuser",
"--pullPolicy", "never",
}
args = append(args, flagstests.SCCFlagTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *podmanPTFFlags, _ *cobra.Command, _ []string) error {
testutils.AssertEquals(t, "Error parsing --ptf", "ptf123", flags.PTFId)
testutils.AssertEquals(t, "Error parsing --test", "test123", flags.TestID)
testutils.AssertEquals(t, "Error parsing --user", "sccuser", flags.CustomerID)
testutils.AssertEquals(t, "Error parsing --pullPolicy", "never", flags.Image.PullPolicy)
flagstests.AssertSCCFlag(t, &flags.ServerFlags.Installation.SCC)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000006d000081a400000000000000000000000168ed21dd00000d99000000000000000000000000000000000000002700000000mgradm/cmd/support/ptf/podman/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build ptf
package podman
import (
"errors"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
podman_shared "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman_shared.Systemd = podman_shared.NewSystemd()
func ptfForPodman(
_ *types.GlobalFlags,
flags *podmanPTFFlags,
_ *cobra.Command,
_ []string,
) error {
// Login first to be able to search the registry for PTF images
hostData, err := podman_shared.InspectHost()
if err != nil {
return err
}
authFile, cleaner, err := podman_shared.PodmanLogin(hostData, flags.Installation.SCC)
if err != nil {
return utils.Errorf(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
//we don't want to perform a postgres version upgrade when installing a PTF.
//in that case, we can use the upgrade command.
dummyImage := types.ImageFlags{}
dummyDB := adm_utils.DBFlags{}
dummyReportDB := adm_utils.DBFlags{}
dummySSL := adm_utils.InstallSSLFlags{}
if err := flags.checkParameters(); err != nil {
return err
}
return podman.Upgrade(systemd, authFile,
"",
dummyDB,
dummyReportDB,
dummySSL,
flags.Image,
dummyImage,
flags.Coco,
flags.HubXmlrpc,
flags.Saline,
flags.Pgsql,
flags.Installation.TZ,
)
}
// variables for unit testing.
var getServiceImage = podman_shared.GetServiceImage
var hasRemoteImage = podman_shared.HasRemoteImage
func (flags *podmanPTFFlags) checkParameters() error {
sccRegistry := "registry.suse.com"
if flags.TestID != "" && flags.PTFId != "" {
return errors.New(L("ptf and test flags cannot be set simultaneously "))
}
if flags.TestID == "" && flags.PTFId == "" {
return errors.New(L("ptf and test flags cannot be empty simultaneously "))
}
if flags.CustomerID == "" {
return errors.New(L("user flag cannot be empty"))
}
suffix := "ptf"
projectID := flags.PTFId
if flags.TestID != "" {
suffix = "test"
projectID = flags.TestID
}
serverImage := getServiceImage(podman_shared.ServerService)
if serverImage == "" {
return errors.New(L("failed to find server image"))
}
var err error
flags.Image.Name, err = utils.ComputePTF(sccRegistry, flags.CustomerID, projectID, serverImage, suffix)
if err != nil {
return err
}
log.Info().Msgf(L("The computed image is %s"), flags.Image.Name)
images := map[string]*string{
podman_shared.ServerAttestationService + "@": &flags.Coco.Image.Name,
podman_shared.HubXmlrpcService: &flags.HubXmlrpc.Image.Name,
podman_shared.SalineService + "@": &flags.Saline.Image.Name,
podman_shared.DBService: &flags.Pgsql.Image.Name,
}
for service, pointer := range images {
if containerImage := getServiceImage(service); containerImage != "" {
// If no image was found then skip it during the upgrade.
containerImage, err = utils.ComputePTF(sccRegistry, flags.CustomerID, projectID, containerImage, suffix)
if err != nil {
return err
}
if hasRemoteImage(containerImage) {
*pointer = containerImage
log.Info().Msgf(L("The %[1]s service image is %[2]s"), service, *pointer)
}
}
}
return nil
}
0707010000006e000081a400000000000000000000000168ed21dd00001200000000000000000000000000000000000000002c00000000mgradm/cmd/support/ptf/podman/utils_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build ptf
package podman
import (
"fmt"
"testing"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
)
func TestCheckParameters(t *testing.T) {
createServiceImages := func(
image string, cocoImage string, hubImage string, salineImage string, dbImage string,
) map[string]string {
return map[string]string{
podman.ServerService: image,
podman.ServerAttestationService + "@": cocoImage,
podman.HubXmlrpcService: hubImage,
podman.SalineService + "@": salineImage,
podman.DBService: dbImage,
}
}
type testData struct {
serviceImages map[string]string
hasRemoteImages map[string]bool
expectedImage string
expectedCocoImage string
expectedHubImage string
expectedSalineImage string
expectedDBImage string
expectedError string
}
data := []testData{
{
createServiceImages("registry.suse.com/suse/manager/5.0/x86_64/server:5.0.0", "", "", "", ""),
map[string]bool{},
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server:latest-ptf-5678",
"",
"",
"",
"",
"",
},
{
createServiceImages(
"registry.suse.com/suse/manager/5.0/x86_64/server:5.0.0",
"registry.suse.com/suse/manager/5.0/x86_64/server-attestation:5.0.0",
"registry.suse.com/suse/manager/5.0/x86_64/server-hub-xmlrpc-api:5.0.0",
"registry.suse.com/suse/manager/5.0/x86_64/server-saline:5.0.0",
"registry.suse.com/suse/manager/5.0/x86_64/server-postgresql:5.0.0",
),
map[string]bool{
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-attestation:latest-ptf-5678": true,
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-hub-xmlrpc-api:latest-ptf-5678": true,
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-saline:latest-ptf-5678": true,
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-postgresql:latest-ptf-5678": true,
},
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server:latest-ptf-5678",
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-attestation:latest-ptf-5678",
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-hub-xmlrpc-api:latest-ptf-5678",
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-saline:latest-ptf-5678",
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-postgresql:latest-ptf-5678",
"",
},
{
createServiceImages(
"registry.suse.com/suse/manager/5.0/x86_64/server:5.0.0",
"registry.suse.com/suse/manager/5.0/x86_64/server-attestation:5.0.0",
"",
"",
"",
),
map[string]bool{
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server:latest-ptf-5678": true,
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-attestation:latest-ptf-5678": false,
},
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server:latest-ptf-5678",
"",
"",
"",
"",
"",
},
{
createServiceImages(
"",
"",
"registry.suse.com/suse/manager/5.0/x86_64/server-hub-xmlrpc-api:5.0.0",
"",
"",
),
map[string]bool{
"registry.suse.com/a/1234/5678/suse/manager/5.0/x86_64/server-hub-xmlrpc-api:latest-ptf-5678": true,
},
"",
"",
"",
"",
"",
"failed to find server image",
},
}
for i, test := range data {
getServiceImage = func(service string) string {
return test.serviceImages[service]
}
hasRemoteImage = func(image string) bool {
return test.hasRemoteImages[image]
}
flags := podmanPTFFlags{
PTFId: "5678",
CustomerID: "1234",
}
testCase := fmt.Sprintf("case #%d - ", i+1)
actualError := flags.checkParameters()
errMessage := ""
if actualError != nil {
errMessage = actualError.Error()
}
testutils.AssertEquals(t, testCase+"error didn't match the expected behavior",
test.expectedError, errMessage,
)
testutils.AssertEquals(t, testCase+"unexpected image", test.expectedImage, flags.Image.Name)
testutils.AssertEquals(t, testCase+"unexpected coco image", test.expectedCocoImage, flags.Coco.Image.Name)
testutils.AssertEquals(t, testCase+"unexpected hub image", test.expectedHubImage, flags.HubXmlrpc.Image.Name)
testutils.AssertEquals(t, testCase+"unexpected saline image", test.expectedSalineImage, flags.Saline.Image.Name)
testutils.AssertEquals(t, testCase+"unexpected db image", test.expectedDBImage, flags.Pgsql.Image.Name)
}
}
0707010000006f000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001e00000000mgradm/cmd/support/ptf/podman07070100000070000081a400000000000000000000000168ed21dd000002a2000000000000000000000000000000000000001e00000000mgradm/cmd/support/ptf/ptf.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build ptf
package ptf
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/support/ptf/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// NewCommand is the command for creates supportptf.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
ptfCmd := &cobra.Command{
Use: "ptf",
Short: L("Install a PTF"),
}
utils.AddBackendFlag(ptfCmd)
ptfCmd.AddCommand(podman.NewCommand(globalFlags))
return ptfCmd
}
07070100000071000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001700000000mgradm/cmd/support/ptf07070100000072000081a400000000000000000000000168ed21dd00001636000000000000000000000000000000000000001e00000000mgradm/cmd/support/sql/sql.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package sql
import (
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func prepareSource(args []string, cnx *shared.Connection) (string, error) {
target := "-"
if len(args) > 0 {
source := args[0]
target = path.Base(source)
if !utils.FileExists(source) {
return "", fmt.Errorf(L("source %s does not exists"), source)
}
randBytes := make([]byte, 16)
if _, err := rand.Read(randBytes); err != nil {
return "", utils.Errorf(err, L("unable to get random file prefix"))
}
target = hex.EncodeToString(randBytes) + target
if err := cnx.Copy(args[0], "server:"+target, "", ""); err != nil {
return "", err
}
}
return target, nil
}
func cleanupSource(file string, cnx *shared.Connection) {
if _, err := cnx.Exec("rm", file); err != nil {
log.Error().Err(err).Msg(L("unable to cleanup source file"))
}
}
func prepareOutput(flags *sqlFlags) (string, error) {
output := "-"
if flags.OutputFile != "" {
output = flags.OutputFile
if utils.FileExists(output) && !flags.ForceOverwrite {
return "", fmt.Errorf(L("output file %s exists, use -f to force overwrite"), output)
}
}
return output, nil
}
func getBaseCommand(keepStdin bool, flags *sqlFlags, cnx *shared.Connection) (string, []string, error) {
podName, err := cnx.GetPodName()
if err != nil {
return "", nil, err
}
command, err := cnx.GetCommand()
if err != nil {
return "", nil, err
}
commandArgs := []string{"exec"}
envs := []string{}
if flags.Interactive {
commandArgs = append(commandArgs, "-i")
envs = append(envs, "ENV=/etc/sh.shrc.local")
commandArgs = append(commandArgs, "-t")
envs = append(envs, utils.GetEnvironmentVarsList()...)
} else if keepStdin {
// To use STDIN source, we need to pass -i
commandArgs = append(commandArgs, "-i")
}
commandArgs = append(commandArgs, podName)
if command == "kubectl" {
namespace, err := cnx.GetNamespace("")
if namespace == "" {
return "", nil, err
}
commandArgs = append(commandArgs, "-n", namespace, "-c", "uyuni", "--")
}
newEnv := []string{}
for _, envValue := range envs {
if !strings.Contains(envValue, "=") {
if value, set := os.LookupEnv(envValue); set {
newEnv = append(newEnv, fmt.Sprintf("%s=%s", envValue, value))
}
} else {
newEnv = append(newEnv, envValue)
}
}
if len(newEnv) > 0 {
commandArgs = append(commandArgs, "env")
commandArgs = append(commandArgs, newEnv...)
}
return command, commandArgs, nil
}
func doSQL(_ *types.GlobalFlags, flags *sqlFlags, _ *cobra.Command, args []string) error {
if flags.Interactive && flags.OutputFile != "" {
return errors.New(L("interactive mode cannot work with a file output"))
}
cnx := shared.NewConnection(flags.Backend, podman.ServerContainerName, kubernetes.ServerFilter)
// Validate options
source, err := prepareSource(args, cnx)
if err != nil {
return err
}
if source != "" && source != "-" {
defer cleanupSource(source, cnx)
}
output, err := prepareOutput(flags)
if err != nil {
return err
}
// For now do quick wrapper around spacewalk-sql tool.
// TODO - ideally use sql directly, but will need some gateway to be able to connect to the database
command, commandArgs, err := getBaseCommand(source == "-", flags, cnx)
if err != nil {
return err
}
commandArgs = append(commandArgs, "/usr/bin/spacewalk-sql")
sqlArgs := []string{}
if flags.Database == "reportdb" {
sqlArgs = append(sqlArgs, "--reportdb")
} else if flags.Database != "productdb" {
return fmt.Errorf(L("unknown or unsupported database %s"), flags.Database)
}
if flags.Interactive {
sqlArgs = append(sqlArgs, "-i")
} else {
sqlArgs = append(sqlArgs, "--select-mode", source)
}
commandArgs = append(commandArgs, sqlArgs...)
err = runCmd(command, output, commandArgs)
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
log.Info().Err(err).Msg(L("Command failed"))
os.Exit(exitErr.ExitCode())
}
}
if output != "-" {
log.Info().Msgf(L("Result is stored in the file '%s'"), output)
}
return nil
}
type copyWriter struct {
Stream io.Writer
}
// Write writes an array of buffer in a stream.
func (l copyWriter) Write(p []byte) (n int, err error) {
// Filter out kubectl line about terminated exit code
if !strings.HasPrefix(string(p), "command terminated with exit code") {
if _, err := l.Stream.Write(p); err != nil {
return 0, utils.Errorf(err, L("cannot write"))
}
n = len(p)
}
return
}
func runCmd(command string, output string, args []string) error {
commandStr := fmt.Sprintf("%s %s", command, strings.Join(args, " "))
log.Info().Msgf(L("Running %s"), commandStr)
runCmd := exec.Command(command, args...)
runCmd.Stdin = os.Stdin
if output == "" || output == "-" {
runCmd.Stdout = copyWriter{Stream: os.Stdout}
} else {
log.Trace().Msgf("Output is FILE %s", output)
f, err := os.OpenFile(output, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return err
}
defer f.Close()
runCmd.Stdout = copyWriter{Stream: f}
}
runCmd.Stderr = copyWriter{Stream: os.Stderr}
if err := runCmd.Start(); err != nil {
log.Debug().Err(err).Msg("error starting command")
return err
}
return runCmd.Wait()
}
07070100000073000081a400000000000000000000000168ed21dd00000704000000000000000000000000000000000000002200000000mgradm/cmd/support/sql/sql_cmd.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package sql
import (
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type sqlFlags struct {
Database string
Interactive bool
ForceOverwrite bool `mapstructure:"force"`
OutputFile string `mapstructure:"output"`
Backend string
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[sqlFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "sql [sql-file]",
Short: L("Execute SQL query"),
Long: L(`Execute SQL query either provided in sql-file or passed through standard input.
Examples:
Run the 'select hostname from rhnserver;' query using echo:
# echo 'select hostname from rhnserver;' | mgradm support sql
Run in interative mode:
# mgradm support sql -i
Running the SQL queries in example.sql file and output them to out.log file
# mgradm support sql example.sql -o out.log
`),
RunE: func(cmd *cobra.Command, args []string) error {
var flags sqlFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
cmd.Flags().StringP("database", "d", "productdb", L("Target database, can be 'reportdb' or 'productdb'"))
cmd.Flags().BoolP("interactive", "i", false, L("Start in interactive mode"))
cmd.Flags().BoolP("force", "f", false, L("Force overwrite of output file if already exists"))
cmd.Flags().StringP("output", "o", "", L("Write output to the file instead of standard output"))
utils.AddBackendFlag(cmd)
return cmd
}
// NewCommand adds support sql command.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, doSQL)
}
07070100000074000081a400000000000000000000000168ed21dd00000534000000000000000000000000000000000000002700000000mgradm/cmd/support/sql/sql_cmd_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package sql
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--database", "reportdb",
"--interactive",
"--force",
"--output", "path/to/output",
"--backend", "kubectl",
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *sqlFlags,
_ *cobra.Command, _ []string,
) error {
testutils.AssertEquals(t, "Error parsing --dababase", "reportdb", flags.Database)
testutils.AssertTrue(t, "Error parsing --interactive", flags.Interactive)
testutils.AssertTrue(t, "Error parsing --force", flags.ForceOverwrite)
testutils.AssertEquals(t, "Error parsing --dababase", "reportdb", flags.Database)
testutils.AssertEquals(t, "Error parsing --output", "path/to/output", flags.OutputFile)
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000075000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001700000000mgradm/cmd/support/sql07070100000076000081a400000000000000000000000168ed21dd000003b6000000000000000000000000000000000000001e00000000mgradm/cmd/support/support.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package support
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/support/config"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/support/ptf"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/support/sql"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand to export supportconfig.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
supportCmd := &cobra.Command{
Use: "support",
GroupID: "tool",
Short: L("Commands for support operations"),
Long: L("Commands for support operations"),
}
supportCmd.AddCommand(config.NewCommand(globalFlags))
supportCmd.AddCommand(sql.NewCommand(globalFlags))
if ptfCommand := ptf.NewCommand(globalFlags); ptfCommand != nil {
supportCmd.AddCommand(ptfCommand)
}
return supportCmd
}
07070100000077000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/support07070100000078000081a400000000000000000000000168ed21dd00000cd0000000000000000000000000000000000000002300000000mgradm/cmd/uninstall/kubernetes.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package uninstall
import (
"strings"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func uninstallForKubernetes(
_ *types.GlobalFlags,
flags *utils.UninstallFlags,
_ *cobra.Command,
_ []string,
) error {
if flags.Purge.Volumes {
log.Warn().Msg(L("--purge-volumes is ignored on a kubernetes deployment"))
}
if flags.Purge.Images {
log.Warn().Msg(L("--purge-images is ignored on a kubernetes deployment"))
}
clusterInfos, err := kubernetes.CheckCluster()
if err != nil {
return err
}
kubeconfig := clusterInfos.GetKubeconfig()
// TODO Find all the PVs related to the server if we want to delete them
// Uninstall uyuni
serverConnection := shared.NewConnection("kubectl", "", kubernetes.ServerFilter)
serverNamespace, err := serverConnection.GetNamespace("")
if err != nil {
return err
}
// Remove all Uyuni resources
if serverNamespace != "" {
objects := "job,deploy,svc,ingress,pvc,cm,secret"
if kubernetes.HasResource("ingressroutetcps") {
objects += ",middlewares,ingressroutetcps,ingressrouteudps"
}
if kubernetes.HasResource("issuers") {
objects += ",issuers,certificates"
}
deleteCmd := []string{
"kubectl", "delete", "-n", serverNamespace, objects,
"-l", kubernetes.AppLabel + "=" + kubernetes.ServerApp,
}
if !flags.Force {
log.Info().Msgf(L("Would run %s"), strings.Join(deleteCmd, " "))
} else {
if err := utils.RunCmd(deleteCmd[0], deleteCmd[1:]...); err != nil {
return utils.Errorf(err, L("failed to delete server resources"))
}
}
}
// TODO Remove the PVs or wait for their automatic removal if purge is requested
// Also wait if the PVs are dynamic with Delete reclaim policy but the user didn't ask to purge them
// Since some storage plugins don't handle Delete policy, we may need to check for error events to avoid infinite loop
// Uninstall cert-manager if we installed it
certManagerConnection := shared.NewConnection("kubectl", "", "-linstalledby=mgradm")
// TODO: re-add "-linstalledby=mgradm" filter once the label is added in helm release
// mgradm/shared/kubernetes/certificates.go:124 was supposed to be addressing it
certManagerNamespace, err := certManagerConnection.GetNamespace("cert-manager")
if err != nil {
return err
}
if certManagerNamespace != "" {
if err := kubernetes.HelmUninstall(certManagerNamespace, kubeconfig, "cert-manager", !flags.Force); err != nil {
return err
}
}
// Remove the K3s Traefik config
if clusterInfos.IsK3s() {
kubernetes.UninstallK3sTraefikConfig(!flags.Force)
}
// Remove the rke2 nginx config
if clusterInfos.IsRke2() {
kubernetes.UninstallRke2NginxConfig(!flags.Force)
}
if !flags.Force {
log.Warn().Msg(L("Nothing has been uninstalled, run with --force to actually uninstall"))
}
log.Warn().Msg(L("Volumes have not been touched. Depending on the storage class used, they may not have been removed"))
return nil
}
07070100000079000081a400000000000000000000000168ed21dd0000018f000000000000000000000000000000000000002500000000mgradm/cmd/uninstall/nokubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package uninstall
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func uninstallForKubernetes(
_ *types.GlobalFlags,
_ *utils.UninstallFlags,
_ *cobra.Command,
_ []string,
) error {
return nil
}
0707010000007a000081a400000000000000000000000168ed21dd00000d44000000000000000000000000000000000000001f00000000mgradm/cmd/uninstall/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package uninstall
import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
func uninstallForPodman(
_ *types.GlobalFlags,
flags *utils.UninstallFlags,
_ *cobra.Command,
_ []string,
) error {
// Get the images from the service configs before they are removed
images := []string{
podman.GetServiceImage(podman.ServerService),
podman.GetServiceImage(podman.ServerAttestationService + "@"),
podman.GetServiceImage(podman.HubXmlrpcService + "@"),
podman.GetServiceImage(podman.SalineService + "@"),
podman.GetServiceImage(podman.DBService),
}
// Uninstall the service
systemd.UninstallService("uyuni-server", !flags.Force)
// Force stop the pod
podman.DeleteContainer(podman.ServerContainerName, !flags.Force)
systemd.UninstallInstantiatedService(podman.ServerAttestationService, !flags.Force)
systemd.UninstallInstantiatedService(podman.HubXmlrpcService, !flags.Force)
systemd.UninstallInstantiatedService(podman.SalineService, !flags.Force)
systemd.UninstallService(podman.DBService, !flags.Force)
// Remove the volumes
if flags.Purge.Volumes {
allOk := true
volumes := []string{"cgroup"}
for _, volume := range utils.ServerVolumeMounts {
volumes = append(volumes, volume.Name)
}
for _, volume := range utils.PgsqlRequiredVolumeMounts {
volumes = append(volumes, volume.Name)
}
for _, volume := range volumes {
if err := podman.DeleteVolume(volume, !flags.Force); err != nil {
log.Warn().Err(err).Msgf(L("Failed to remove volume %s"), volume)
allOk = false
}
}
if allOk {
log.Info().Msg(L("All volumes have been removed"))
} else {
log.Warn().Msg(L("Some volumes have not been removed completely"))
}
}
if flags.Purge.Images {
for _, image := range images {
if image != "" {
if err := podman.DeleteImage(image, !flags.Force); err != nil {
return utils.Errorf(err, L("cannot delete image %s"), image)
}
}
}
log.Info().Msg(L("All images have been removed"))
}
podman.DeleteNetwork(!flags.Force)
podman.DeleteSecret(podman.ReportDBUserSecret, !flags.Force)
podman.DeleteSecret(podman.ReportDBPassSecret, !flags.Force)
podman.DeleteSecret(podman.DBUserSecret, !flags.Force)
podman.DeleteSecret(podman.DBPassSecret, !flags.Force)
podman.DeleteSecret(podman.DBAdminUserSecret, !flags.Force)
podman.DeleteSecret(podman.DBAdminPassSecret, !flags.Force)
podman.DeleteSecret(podman.DBSSLCertSecret, !flags.Force)
podman.DeleteSecret(podman.DBSSLKeySecret, !flags.Force)
podman.DeleteSecret(podman.DBCASecret, !flags.Force)
podman.DeleteSecret(podman.CASecret, !flags.Force)
podman.DeleteSecret(podman.SSLCertSecret, !flags.Force)
podman.DeleteSecret(podman.SSLKeySecret, !flags.Force)
err := systemd.ReloadDaemon(!flags.Force)
if !flags.Force {
log.Warn().Msg(
L("Nothing has been uninstalled, run with --force and --purge-volumes to actually uninstall and clear data"),
)
} else if !flags.Purge.Volumes {
log.Warn().Msg(L("Data have been kept, use podman volume commands to clear the volumes"))
}
return err
}
0707010000007b000081a400000000000000000000000168ed21dd00000630000000000000000000000000000000000000002200000000mgradm/cmd/uninstall/uninstall.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package uninstall
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[utils.UninstallFlags]) *cobra.Command {
uninstallCmd := &cobra.Command{
Use: "uninstall",
GroupID: "deploy",
Short: L("Uninstall a server"),
Long: L(`Uninstall a server and optionally the corresponding volumes.
By default it will only print what would be done, use --force to actually remove.`) + kubernetes.UninstallHelp(),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags utils.UninstallFlags
return utils.CommandHelper(globalFlags, cmd, args, &flags, nil, run)
},
}
utils.AddUninstallFlags(uninstallCmd, utils.KubernetesBuilt)
return uninstallCmd
}
// NewCommand uninstall a server and optionally the corresponding volumes.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, uninstall)
}
func uninstall(
globalFlags *types.GlobalFlags,
flags *utils.UninstallFlags,
cmd *cobra.Command,
args []string,
) error {
fn, err := shared.ChoosePodmanOrKubernetes(cmd.Flags(), uninstallForPodman, uninstallForKubernetes)
if err != nil {
return err
}
return fn(globalFlags, flags, cmd, args)
}
0707010000007c000081a400000000000000000000000168ed21dd000004ec000000000000000000000000000000000000002700000000mgradm/cmd/uninstall/uninstall_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package uninstall
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestParamsParsing(t *testing.T) {
args := []string{
"--force",
"--purge-volumes",
"--purge-images",
}
if utils.KubernetesBuilt {
args = append(args, "--backend", "kubectl")
}
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *utils.UninstallFlags, _ *cobra.Command, _ []string) error {
testutils.AssertTrue(t, "Error parsing --force", flags.Force)
testutils.AssertTrue(t, "Error parsing --purge-volumes", flags.Purge.Volumes)
testutils.AssertTrue(t, "Error parsing --purge-images", flags.Purge.Images)
if utils.KubernetesBuilt {
testutils.AssertEquals(t, "Error parsing --backend", "kubectl", flags.Backend)
}
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
0707010000007d000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001500000000mgradm/cmd/uninstall0707010000007e000081a400000000000000000000000168ed21dd00000673000000000000000000000000000000000000002c00000000mgradm/cmd/upgrade/kubernetes/kubernetes.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command {
upgradeCmd := &cobra.Command{
Use: "kubernetes",
Short: L("Upgrade a local server on kubernetes"),
Long: L("Upgrade a local server on kubernetes"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags kubernetes.KubernetesServerFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
flags.ServerFlags.Pgsql.IsChanged = v.IsSet("pgsql.replicas")
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
shared.AddUpgradeFlags(upgradeCmd)
cmd_utils.AddHelmInstallFlag(upgradeCmd)
return upgradeCmd
}
// NewCommand to upgrade a kubernetes server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
return newCmd(globalFlags, upgradeKubernetes)
}
0707010000007f000081a400000000000000000000000168ed21dd000009b1000000000000000000000000000000000000003100000000mgradm/cmd/upgrade/kubernetes/kubernetes_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := []string{}
args = append(args, "--ssl-password", "sslsecret")
args = append(args, flagstests.ImageFlagsTestArgs...)
args = append(args, flagstests.DBUpdateImageFlagTestArgs...)
args = append(args, flagstests.CocoFlagsTestArgs...)
args = append(args, flagstests.HubXmlrpcFlagsTestArgs...)
args = append(args, flagstests.SalineFlagsTestArgs...)
args = append(args, flagstests.PgsqlFlagsTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
args = append(args, flagstests.ServerKubernetesFlagsTestArgs...)
args = append(args, flagstests.DBFlagsTestArgs...)
args = append(args, flagstests.ReportDBFlagsTestArgs...)
args = append(args, flagstests.InstallDBSSLFlagsTestArgs...)
args = append(args, flagstests.InstallSSLFlagsTestArgs...)
args = append(args, flagstests.SSLGenerationFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags,
_ *cobra.Command, _ []string,
) error {
flagstests.AssertImageFlag(t, &flags.Image)
flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage)
flagstests.AssertCocoFlag(t, &flags.Coco)
flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc)
flagstests.AssertSalineFlag(t, &flags.Saline)
flagstests.AssertPgsqlFlag(t, &flags.Pgsql)
flagstests.AssertSCCFlag(t, &flags.ServerFlags.Installation.SCC)
flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes)
flagstests.AssertDBFlag(t, &flags.Installation.DB)
flagstests.AssertReportDBFlag(t, &flags.Installation.ReportDB)
flagstests.AssertInstallDBSSLFlag(t, &flags.Installation.SSL.DB)
flagstests.AssertInstallSSLFlag(t, &flags.Installation.SSL)
flagstests.AssertSSLGenerationFlag(t, &flags.Installation.SSL.SSLCertGenerationFlags)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000080000081a400000000000000000000000168ed21dd0000011a000000000000000000000000000000000000002900000000mgradm/cmd/upgrade/kubernetes/nobuild.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func NewCommand(_ *types.GlobalFlags) *cobra.Command {
return nil
}
07070100000081000081a400000000000000000000000168ed21dd000001c4000000000000000000000000000000000000002700000000mgradm/cmd/upgrade/kubernetes/utils.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func upgradeKubernetes(
_ *types.GlobalFlags,
flags *kubernetes.KubernetesServerFlags,
_ *cobra.Command,
_ []string,
) error {
return kubernetes.Reconcile(flags, "")
}
07070100000082000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001e00000000mgradm/cmd/upgrade/kubernetes07070100000083000081a400000000000000000000000168ed21dd00000a66000000000000000000000000000000000000002400000000mgradm/cmd/upgrade/podman/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
type podmanUpgradeFlags struct {
cmd_utils.ServerFlags `mapstructure:",squash"`
Podman podman.PodmanFlags
}
func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanUpgradeFlags]) *cobra.Command {
cmd := &cobra.Command{
Use: "podman",
Short: L("Upgrade a local server on podman"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
var flags podmanUpgradeFlags
flagsUpdater := func(v *viper.Viper) {
flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas")
flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas")
flags.ServerFlags.Saline.IsChanged = v.IsSet("saline.replicas") || v.IsSet("saline.port")
}
return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run)
},
}
shared.AddUpgradeFlags(cmd)
podman.AddPodmanArgFlag(cmd)
return cmd
}
func newListCmd(globalFlags *types.GlobalFlags, run func(*podmanUpgradeFlags) error) *cobra.Command {
listCmd := &cobra.Command{
Use: "list",
Short: L("List available tags for an image"),
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, _ []string) error {
viper, _ := utils.ReadConfig(cmd, utils.GlobalConfigFilename, globalFlags.ConfigPath)
var flags podmanUpgradeFlags
if err := viper.Unmarshal(&flags); err != nil {
return utils.Errorf(err, L("failed to unmarshall configuration"))
}
if err := run(&flags); err != nil {
return err
}
return nil
},
}
shared.AddUpgradeListFlags(listCmd)
return listCmd
}
func listTags(flags *podmanUpgradeFlags) error {
hostData, err := podman.InspectHost()
if err != nil {
return err
}
authFile, cleaner, err := podman.PodmanLogin(hostData, flags.Installation.SCC)
if err != nil {
return utils.Errorf(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
return podman.ShowAvailableTag(flags.Image.Registry, flags.Image, authFile)
}
// NewCommand to upgrade a podman server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
cmd := newCmd(globalFlags, upgradePodman)
cmd.AddCommand(newListCmd(globalFlags, listTags))
return cmd
}
07070100000084000081a400000000000000000000000168ed21dd0000065f000000000000000000000000000000000000002900000000mgradm/cmd/upgrade/podman/podman_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"testing"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
func TestParamsParsing(t *testing.T) {
args := flagstests.ServerFlagsTestArgs()
args = append(args, flagstests.PodmanFlagsTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(_ *types.GlobalFlags, flags *podmanUpgradeFlags,
_ *cobra.Command, _ []string,
) error {
flagstests.AssertPodmanInstallFlags(t, &flags.Podman)
flagstests.AssertServerFlags(t, &flags.ServerFlags)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
func TestListParamsParsing(t *testing.T) {
args := []string{}
args = append(args, flagstests.ImageFlagsTestArgs...)
args = append(args, flagstests.SCCFlagTestArgs...)
// Test function asserting that the args are properly parsed
tester := func(flags *podmanUpgradeFlags) error {
flagstests.AssertImageFlag(t, &flags.Image)
flagstests.AssertSCCFlag(t, &flags.Installation.SCC)
return nil
}
globalFlags := types.GlobalFlags{}
cmd := newListCmd(&globalFlags, tester)
testutils.AssertHasAllFlags(t, cmd, args)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("command failed with error: %s", err)
}
}
07070100000085000081a400000000000000000000000168ed21dd00000553000000000000000000000000000000000000002300000000mgradm/cmd/upgrade/podman/utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"errors"
"os/exec"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd shared_podman.Systemd = shared_podman.NewSystemd()
func upgradePodman(_ *types.GlobalFlags, flags *podmanUpgradeFlags, cmd *cobra.Command, _ []string) error {
hostData, err := shared_podman.InspectHost()
if err != nil {
return err
}
authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC)
if err != nil {
return utils.Errorf(err, L("failed to login to registry.suse.com"))
}
defer cleaner()
flags.Installation.CheckUpgradeParameters(cmd, "podman")
if _, err := exec.LookPath("podman"); err != nil {
return errors.New(L("install podman before running this command"))
}
return podman.Upgrade(
systemd, authFile,
flags.Image.Registry,
flags.Installation.DB,
flags.Installation.ReportDB,
flags.Installation.SSL,
flags.Image,
flags.DBUpgradeImage,
flags.Coco,
flags.HubXmlrpc,
flags.Saline,
flags.Pgsql,
flags.Installation.TZ,
)
}
07070100000086000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/upgrade/podman07070100000087000081a400000000000000000000000168ed21dd0000028d000000000000000000000000000000000000002300000000mgradm/cmd/upgrade/shared/flags.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
import (
"github.com/spf13/cobra"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
)
// AddUpgradeFlags add upgrade flags to a command.
func AddUpgradeFlags(cmd *cobra.Command) {
adm_utils.AddServerFlags(cmd)
adm_utils.AddDBUpgradeImageFlag(cmd)
adm_utils.AddUpgradeCocoFlag(cmd)
adm_utils.AddUpgradeHubXmlrpcFlags(cmd)
adm_utils.AddUpgradeSalineFlag(cmd)
}
// AddUpgradeListFlags add upgrade list flags to a command.
func AddUpgradeListFlags(cmd *cobra.Command) {
adm_utils.AddImageFlag(cmd)
adm_utils.AddSCCFlag(cmd)
}
07070100000088000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001a00000000mgradm/cmd/upgrade/shared07070100000089000081a400000000000000000000000168ed21dd00000352000000000000000000000000000000000000001e00000000mgradm/cmd/upgrade/upgrade.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package upgrade
import (
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/kubernetes"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/podman"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// NewCommand for upgrading a local server.
func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command {
upgradeCmd := &cobra.Command{
Use: "upgrade server",
GroupID: "deploy",
Short: L("Upgrade local server"),
Long: L("Upgrade local server"),
}
upgradeCmd.AddCommand(podman.NewCommand(globalFlags))
if kubernetesCmd := kubernetes.NewCommand(globalFlags); kubernetesCmd != nil {
upgradeCmd.AddCommand(kubernetesCmd)
}
return upgradeCmd
}
0707010000008a000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/cmd/upgrade0707010000008b000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000000b00000000mgradm/cmd0707010000008c000081a400000000000000000000000168ed21dd000002ba000000000000000000000000000000000000000f00000000mgradm/main.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"os"
"github.com/chai2010/gettext-go"
"github.com/spf13/cobra"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd"
l10n_utils "github.com/uyuni-project/uyuni-tools/shared/l10n/utils"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// Run runs the `mgradm` root command.
func Run() error {
gettext.BindLocale(gettext.New("mgradm", utils.LocaleRoot, l10n_utils.New(utils.LocaleRoot)))
cobra.EnableCaseInsensitive = true
run, err := cmd.NewUyuniadmCommand()
if err != nil {
return err
}
return run.Execute()
}
func main() {
if err := Run(); err != nil {
os.Exit(1)
}
}
0707010000008d000081a400000000000000000000000168ed21dd0000017c000000000000000000000000000000000000001a00000000mgradm/shared/cmd_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package shared
import (
"testing"
"github.com/uyuni-project/uyuni-tools/mgradm/cmd"
)
func TestSubCommandsHaveGroup(t *testing.T) {
mgradmCmd, _ := cmd.NewUyuniadmCommand()
if !mgradmCmd.AllChildCommandsHaveGroup() {
t.Errorf("There's at least one mgradm subcommand without group")
}
}
0707010000008e000081a400000000000000000000000168ed21dd00000eec000000000000000000000000000000000000001b00000000mgradm/shared/coco/coco.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package coco
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// Upgrade coco attestation.
func Upgrade(
systemd podman.Systemd,
authFile string,
registry string,
cocoFlags adm_utils.CocoFlags,
baseImage types.ImageFlags,
db adm_utils.DBFlags,
) error {
if cocoFlags.Image.Name == "" {
// Don't touch the coco service in ptf if not already present.
return nil
}
if err := podman.CreateCredentialsSecrets(
podman.DBUserSecret, db.User,
podman.DBPassSecret, db.Password,
); err != nil {
return err
}
if err := writeCocoServiceFiles(
systemd, authFile, registry, cocoFlags, baseImage, db,
); err != nil {
return err
}
if !cocoFlags.IsChanged {
return systemd.RestartInstantiated(podman.ServerAttestationService)
}
return systemd.ScaleService(cocoFlags.Replicas, podman.ServerAttestationService)
}
func writeCocoServiceFiles(
systemd podman.Systemd,
authFile string,
registry string,
cocoFlags adm_utils.CocoFlags,
baseImage types.ImageFlags,
db adm_utils.DBFlags,
) error {
image := cocoFlags.Image
currentReplicas := systemd.CurrentReplicaCount(podman.ServerAttestationService)
log.Debug().Msgf("Current Confidential Computing replicas running are %d.", currentReplicas)
if image.Tag == "" {
if baseImage.Tag != "" {
image.Tag = baseImage.Tag
} else {
image.Tag = "latest"
}
}
if !cocoFlags.IsChanged {
log.Debug().Msg("Confidential Computing settings are not changed.")
} else if cocoFlags.Replicas == 0 {
log.Debug().Msg("No Confidential Computing requested.")
}
cocoImage, err := utils.ComputeImage(registry, baseImage.Tag, image)
if err != nil {
return utils.Errorf(err, L("failed to compute image URL"))
}
pullEnabled := (cocoFlags.Replicas > 0 && cocoFlags.IsChanged) || (currentReplicas > 0 && !cocoFlags.IsChanged)
preparedImage, err := podman.PrepareImage(authFile, cocoImage, baseImage.PullPolicy, pullEnabled)
if err != nil {
return err
}
attestationData := templates.AttestationServiceTemplateData{
NamePrefix: "uyuni",
Network: podman.UyuniNetwork,
Image: preparedImage,
DBUserSecret: podman.DBUserSecret,
DBPassSecret: podman.DBPassSecret,
}
log.Info().Msg(L("Setting up confidential computing attestation service"))
if err := utils.WriteTemplateToFile(attestationData,
podman.GetServicePath(podman.ServerAttestationService+"@"), 0555, true); err != nil {
return utils.Errorf(err, L("failed to generate systemd service unit file"))
}
environment := fmt.Sprintf(`Environment=UYUNI_SERVER_ATTESTATION_IMAGE=%s
Environment=database_connection=jdbc:postgresql://%s:%d/%s
`, preparedImage, db.Host, db.Port, db.Name)
if err := podman.GenerateSystemdConfFile(
podman.ServerAttestationService+"@", "generated.conf", environment, true,
); err != nil {
return utils.Errorf(err, L("cannot generate systemd conf file"))
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
return nil
}
// SetupCocoContainer sets up the confidential computing attestation service.
func SetupCocoContainer(
systemd podman.Systemd,
authFile string,
registry string,
coco adm_utils.CocoFlags,
baseImage types.ImageFlags,
db adm_utils.DBFlags,
) error {
if err := writeCocoServiceFiles(
systemd, authFile, registry, coco, baseImage, db,
); err != nil {
return err
}
return systemd.ScaleService(coco.Replicas, podman.ServerAttestationService)
}
0707010000008f000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001300000000mgradm/shared/coco07070100000090000081a400000000000000000000000168ed21dd00001035000000000000000000000000000000000000001f00000000mgradm/shared/hub/xmlrpcapi.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package hub
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// SetupHubXmlrpc prepares the systemd service and starts it if needed.
// tag is the global images tag.
func SetupHubXmlrpc(
systemd podman.Systemd,
authFile string,
registry string,
pullPolicy string,
tag string,
hubXmlrpcFlags cmd_utils.HubXmlrpcFlags,
) error {
image := hubXmlrpcFlags.Image
currentReplicas := systemd.CurrentReplicaCount(podman.HubXmlrpcService)
log.Debug().Msgf("Current HUB replicas running are %d.", currentReplicas)
if hubXmlrpcFlags.Replicas == 0 {
log.Debug().Msg("No HUB requested.")
}
if !hubXmlrpcFlags.IsChanged && hubXmlrpcFlags.Replicas == currentReplicas {
log.Info().Msgf(L("No changes requested for hub. Keep %d replicas."), currentReplicas)
}
pullEnabled := hubXmlrpcFlags.Replicas > 0 || (currentReplicas > 0 && !hubXmlrpcFlags.IsChanged)
hubXmlrpcImage, err := utils.ComputeImage(registry, tag, image)
if err != nil {
return utils.Errorf(err, L("failed to compute image URL"))
}
preparedImage, err := podman.PrepareImage(authFile, hubXmlrpcImage, pullPolicy, pullEnabled)
if err != nil {
return err
}
if err := generateHubXmlrpcSystemdService(systemd, preparedImage, podman.ServerContainerName); err != nil {
return utils.Errorf(err, L("cannot generate systemd service"))
}
if err := EnableHubXmlrpc(systemd, hubXmlrpcFlags.Replicas); err != nil {
return err
}
return nil
}
// EnableHubXmlrpc enables the hub xmlrpc service if the number of replicas is 1.
// This function is meant for installation or migration, to enable or disable the service after, use ScaleService.
func EnableHubXmlrpc(systemd podman.Systemd, replicas int) error {
if replicas > 1 {
log.Warn().Msg(L("Multiple Hub XML-RPC container replicas are not currently supported, setting up only one."))
replicas = 1
}
if replicas > 0 {
if err := systemd.ScaleService(replicas, podman.HubXmlrpcService); err != nil {
return utils.Errorf(err, L("cannot enable service"))
}
}
return nil
}
// Upgrade updates the systemd service files and restarts the containers if needed.
func Upgrade(
systemd podman.Systemd,
authFile string,
registry string,
pullPolicy string,
tag string,
hubXmlrpcFlags cmd_utils.HubXmlrpcFlags,
) error {
if hubXmlrpcFlags.Image.Name == "" {
// Don't touch the hub service in ptf if not already present.
return nil
}
if err := SetupHubXmlrpc(systemd, authFile, registry, pullPolicy, tag, hubXmlrpcFlags); err != nil {
return err
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
if !hubXmlrpcFlags.IsChanged {
return systemd.RestartInstantiated(podman.HubXmlrpcService)
}
return systemd.ScaleService(hubXmlrpcFlags.Replicas, podman.HubXmlrpcService)
}
// generateHubXmlrpcSystemdService creates the Hub XMLRPC systemd files.
func generateHubXmlrpcSystemdService(systemd podman.Systemd, image string, serverHost string) error {
hubXmlrpcData := templates.HubXmlrpcServiceTemplateData{
CaSecret: podman.CASecret,
CaPath: ssl.CAContainerPath,
Ports: utils.HubXmlrpcPorts,
NamePrefix: "uyuni",
Network: podman.UyuniNetwork,
Image: image,
ServerHost: serverHost,
}
if err := utils.WriteTemplateToFile(
hubXmlrpcData, podman.GetServicePath(podman.HubXmlrpcService+"@"), 0555, true,
); err != nil {
return utils.Errorf(err, L("failed to generate systemd service unit file"))
}
environment := fmt.Sprintf("Environment=UYUNI_HUB_XMLRPC_IMAGE=%s", image)
if err := podman.GenerateSystemdConfFile(
podman.HubXmlrpcService+"@", "generated.conf", environment, true,
); err != nil {
return utils.Errorf(err, L("cannot generate systemd conf file"))
}
return systemd.ReloadDaemon(false)
}
07070100000091000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001200000000mgradm/shared/hub07070100000092000081a400000000000000000000000168ed21dd00001ed7000000000000000000000000000000000000002900000000mgradm/shared/kubernetes/certificates.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"encoding/base64"
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeployExistingCertificate execute a deploy of an existing certificate.
func DeployExistingCertificate(namespace string, sslFlags *cmd_utils.InstallSSLFlags) error {
if err := createTLSCertificate(
namespace, kubernetes.CASecretName, kubernetes.CAConfigName, &sslFlags.Ca, &sslFlags.Server,
); err != nil {
return err
}
// Handle the DB certificate
var dbCA *types.CaChain
var dbPair *types.SSLPair
if sslFlags.UseProvidedDB() {
dbCA = &sslFlags.DB.CA
dbPair = &sslFlags.DB.SSLPair
} else if sslFlags.DB.IsDefined() && !sslFlags.DB.CA.IsThirdParty() {
//let's use the CA already present in the server
dbCA = &sslFlags.Ca
dbPair = &sslFlags.DB.SSLPair
} else {
return errors.New(L("Database SSL certificate and key have to be defined"))
}
return createTLSCertificate(namespace, kubernetes.DBCertSecretName, kubernetes.DBCAConfigName, dbCA, dbPair)
}
func createTLSCertificate(
namespace string,
secretName string,
caConfigName string,
ca *types.CaChain,
certPair *types.SSLPair,
) error {
// Deploy the SSL Certificate secret and CA ConfigMap
serverCrt, rootCaCrt, err := ssl.OrderCas(ca, certPair)
if err != nil {
return err
}
serverKey := utils.ReadFile(certPair.Key)
tempDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
secretPath := filepath.Join(tempDir, "secret.yaml")
log.Info().Msg(L("Creating SSL server certificate secret"))
tlsSecretData := templates.TLSSecretTemplateData{
Namespace: namespace,
Name: secretName,
Certificate: base64.StdEncoding.EncodeToString(serverCrt),
Key: base64.StdEncoding.EncodeToString(serverKey),
RootCa: base64.StdEncoding.EncodeToString(rootCaCrt),
}
if err = utils.WriteTemplateToFile(tlsSecretData, secretPath, 0500, true); err != nil {
return utils.Errorf(err, L("Failed to generate %s secret definition"), secretName)
}
err = utils.RunCmd("kubectl", "apply", "-f", secretPath)
if err != nil {
return utils.Errorf(err, L("Failed to create %s TLS secret"), secretName)
}
// Copy the CA cert into a ConfigMap for containers who shouldn't see the key
return createCAConfig(namespace, caConfigName, rootCaCrt)
}
// DeployReusedCA deploys an existing SSL CA using an already installed cert-manager.
func DeployReusedCA(namespace string, ca *types.SSLPair, fqdn string) error {
log.Info().Msg(L("Creating cert-manager issuer for existing CA"))
return templates.NewReusedCAIssuerTemplate(namespace, fqdn, ca.Cert, ca.Key).Apply()
}
// DeployGenerateCA deploys a new SSL CA using cert-manager.
func DeployGeneratedCA(
namespace string,
sslFlags *cmd_utils.InstallSSLFlags,
fqdn string,
) error {
log.Info().Msg(L("Creating SSL certificate issuer"))
return templates.NewGeneratedCAIssuerTemplate(
namespace,
fqdn,
sslFlags.Country,
sslFlags.State,
sslFlags.City,
sslFlags.Org,
sslFlags.OU,
sslFlags.Email,
).Apply()
}
// Wait for issuer to be ready.
func waitForIssuer(namespace string, name string) error {
for i := 0; i < 60; i++ {
out, err := utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get",
"-o=jsonpath={.status.conditions[*].type}",
"-n", namespace,
"issuer", name,
)
if err == nil && string(out) == "Ready" {
return nil
}
time.Sleep(1 * time.Second)
}
return errors.New(L("Issuer didn't turn ready after 60s"))
}
// InstallCertManager deploys the cert-manager helm chart with the CRDs.
func InstallCertManager(kubernetesFlags *cmd_utils.KubernetesFlags, kubeconfig string, imagePullPolicy string) error {
if ready, err := kubernetes.IsDeploymentReady("", "cert-manager"); err != nil {
return err
} else if !ready {
log.Info().Msg(L("Installing cert-manager"))
repo := ""
chart := kubernetesFlags.CertManager.Chart
version := kubernetesFlags.CertManager.Version
namespace := kubernetesFlags.CertManager.Namespace
args := []string{
"--set", "crds.enabled=true",
"--set", "crds.keep=true",
"--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}",
"--set", "image.pullPolicy=" + string(kubernetes.GetPullPolicy(imagePullPolicy)),
}
extraValues := kubernetesFlags.CertManager.Values
if extraValues != "" {
args = append(args, "-f", extraValues)
}
// Use upstream chart if nothing defined
if chart == "" {
repo = "https://charts.jetstack.io"
chart = "cert-manager"
}
// The installedby label will be used to only uninstall what we installed
if err := kubernetes.HelmUpgrade(
kubeconfig, namespace, true, repo, "cert-manager", chart, version, args...,
); err != nil {
return utils.Error(err, L("cannot run helm upgrade"))
}
}
// Wait for cert-manager to be ready
err := kubernetes.WaitForDeployments("", "cert-manager-webhook")
if err != nil {
return utils.Error(err, L("cannot deploy"))
}
return nil
}
func extractCACertToConfig(namespace string) error {
// TODO Replace with [trust-manager](https://cert-manager.io/docs/projects/trust-manager/) to automate this
const jsonPath = "-o=jsonpath={.data.ca\\.crt}"
log.Info().Msg(L("Extracting CA certificate to a ConfigMap"))
// Skip extracting if the configmap is already present
out, err := utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "configmap", kubernetes.CAConfigName, jsonPath, "-n", namespace,
)
log.Info().Msgf(L("CA cert: %s"), string(out))
if err == nil && len(out) > 0 {
log.Info().Msgf(L("%s ConfigMap already existing, skipping extraction"), kubernetes.CAConfigName)
return nil
}
out, err = utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, kubernetes.CAConfigName, jsonPath,
)
if err != nil {
return utils.Errorf(err, L("Failed to get %s certificate"), kubernetes.CAConfigName)
}
decoded, err := base64.StdEncoding.DecodeString(string(out))
if err != nil {
return utils.Error(err, L("failed to base64 decode CA certificate"))
}
// Copy the CA to a ConfigMap as the secret shouldn't be available to the server
if err := createCAConfig(namespace, kubernetes.CAConfigName, decoded); err != nil {
return err
}
// Also copy the CA to a separate ConfigMap as we would be expecting it for the setup and server containers
return createCAConfig(namespace, kubernetes.DBCAConfigName, decoded)
}
func createCAConfig(namespace string, name string, ca []byte) error {
configMap := core.ConfigMap{
TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
Data: map[string]string{
"ca.crt": string(ca),
},
}
return kubernetes.Apply([]runtime.Object{&configMap}, fmt.Sprintf(L("failed to create the %s ConfigMap"), name))
}
// HasIssuer returns true if the issuer is defined.
//
// False will be returned in case of errors or if the issuer resource doesn't exist on the cluster.
func HasIssuer(namespace string, name string) bool {
out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "issuer", "-n", namespace, name, "-o", "name")
return err == nil && strings.TrimSpace(string(out)) != ""
}
07070100000093000081a400000000000000000000000168ed21dd00000362000000000000000000000000000000000000002e00000000mgradm/shared/kubernetes/certificates_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"errors"
"fmt"
"testing"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
)
func TestHasIssuer(t *testing.T) {
type testType struct {
out string
err error
expected bool
}
data := []testType{
{
out: "issuer.cert-manager.io/someissuer\n",
err: nil,
expected: true,
},
{
out: "any error\n",
err: errors.New("Any error"),
expected: false,
},
}
for i, test := range data {
runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) {
return []byte(test.out), test.err
}
testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected,
HasIssuer("somens", "someissuer"),
)
}
}
07070100000094000081a400000000000000000000000168ed21dd00000b48000000000000000000000000000000000000002100000000mgradm/shared/kubernetes/coco.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"fmt"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// CocoApiDeployName is the deployment name for confidential computing attestations.
CocoDeployName = "uyuni-coco-attestation"
)
// StartCocoDeployment installs the confidential computing deployment.
func StartCocoDeployment(
namespace string,
image string,
pullPolicy string,
pullSecret string,
replicas int,
dbPort int,
dbName string,
) error {
deploy := getCocoDeployment(namespace, image, pullPolicy, pullSecret, int32(replicas), dbPort, dbName)
return kubernetes.Apply([]runtime.Object{deploy},
L("failed to create confidential computing attestations deployment"),
)
}
func getCocoDeployment(
namespace string,
image string,
pullPolicy string,
pullSecret string,
replicas int32,
dbPort int,
dbName string,
) *apps.Deployment {
cnxURL := fmt.Sprintf("jdbc:postgresql://%s:%d/%s", utils.DBServiceName, dbPort, dbName)
deploy := &apps.Deployment{
TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: meta.ObjectMeta{
Name: CocoDeployName,
Namespace: namespace,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &meta.LabelSelector{
MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent),
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent),
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "coco",
Image: image,
ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy),
Env: []core.EnvVar{
{Name: "database_connection", Value: cnxURL},
{Name: "database_user", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBSecret},
Key: secretUsername,
},
}},
{Name: "database_password", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBSecret},
Key: secretPassword,
},
}},
},
},
},
},
},
},
}
if pullSecret != "" {
deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}}
}
return deploy
}
07070100000095000081a400000000000000000000000168ed21dd000018d3000000000000000000000000000000000000001f00000000mgradm/shared/kubernetes/db.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"strings"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// DBDeployName is the name of the database Deployment object.
DBDeployName = "db"
// DBAdminSecret is the name of the database administrator credentials secret.
// This secret is only needed for a DB prepared by mgradm.
DBAdminSecret = "db-admin-credentials"
// DBSecret is the name of the database credentials secret.
DBSecret = "db-credentials"
// ReportdbSecret is the name of the report database credentials secret.
ReportdbSecret = "reportdb-credentials"
SCCSecret = "scc-credentials"
secretUsername = "username"
secretPassword = "password"
)
// CreateBasicAuthSecret creates a secret of type basic-auth.
func CreateBasicAuthSecret(namespace string, name string, user string, password string) error {
// Check if the secret is already existing
out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", name, "-o", "name")
if err == nil && strings.TrimSpace(string(out)) != "" {
return nil
}
// Create the secret
secret := core.Secret{
TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent),
},
// It seems serializing this object automatically transforms the secrets to base64.
Data: map[string][]byte{
secretUsername: []byte(user),
secretPassword: []byte(password),
},
Type: core.SecretTypeBasicAuth,
}
return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the secret"))
}
// CreateDBDeployment creates a new deployment of the database.
func CreateDBDeployment(
namespace string,
image string,
pullPolicy string,
pullSecret string,
timezone string,
) error {
deploy := getDBDeployment(namespace, image, kubernetes.GetPullPolicy(pullPolicy), pullSecret, timezone)
return kubernetes.Apply([]runtime.Object{deploy}, L("failed to create the database deployment"))
}
func getDBDeployment(
namespace string,
image string,
pullPolicy core.PullPolicy,
pullSecret string,
timezone string,
) *apps.Deployment {
var replicas int32 = 1
mounts := []types.VolumeMount{utils.VarPgsqlDataVolumeMount}
volumeMounts := kubernetes.ConvertVolumeMounts(mounts)
volumes := kubernetes.CreateVolumes(mounts)
// Add TLS secret
const tlsVolumeName = "tls-secret"
var secretMode int32 = 0400
tlsVolume := kubernetes.CreateSecretVolume(tlsVolumeName, kubernetes.DBCertSecretName)
tlsVolume.Secret.Items = []core.KeyToPath{
{Key: "tls.crt", Path: "tls/certs/spacewalk.crt"},
{Key: "tls.key", Path: "tls/private/pg-spacewalk.key", Mode: &secretMode},
{Key: "ca.crt", Path: "trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT"},
}
volumes = append(volumes, tlsVolume)
volumeMounts = append(volumeMounts,
core.VolumeMount{Name: tlsVolumeName, MountPath: "/etc/pki"},
)
envs := []core.EnvVar{
{Name: "TZ", Value: timezone},
// Add the admin credentials secret
{
Name: "POSTGRES_USER",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBAdminSecret}, Key: secretUsername,
},
},
},
{
Name: "POSTGRES_PASSWORD",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBAdminSecret}, Key: secretPassword,
},
},
},
// Add the internal db user credentials secret
{
Name: "MANAGER_USER",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, Key: secretUsername,
},
},
},
{
Name: "MANAGER_PASS",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, Key: secretPassword,
},
},
},
// Add the report db user credentials secret
{
Name: "REPORT_DB_USER",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: ReportdbSecret}, Key: secretUsername,
},
},
},
{
Name: "REPORT_DB_PASS",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: ReportdbSecret}, Key: secretPassword,
},
},
},
}
// fsGroup is required to set the owner of the mounted files, most importantly the SSL key file.
var fsGroup int64 = 999
deploy := apps.Deployment{
TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: meta.ObjectMeta{
Name: DBDeployName,
Namespace: namespace,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.DBComponent),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
// Since the DB container will never be able to scale, we need to stick to recreate strategy
// or the new deployed pods won't be ready.
Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType},
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.DBComponent},
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.DBComponent),
},
Spec: core.PodSpec{
SecurityContext: &core.PodSecurityContext{
FSGroup: &fsGroup,
},
Containers: []core.Container{
{
Name: "db",
Image: image,
ImagePullPolicy: pullPolicy,
VolumeMounts: volumeMounts,
Env: envs,
},
},
Volumes: volumes,
},
},
},
}
if pullSecret != "" {
deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}}
}
return &deploy
}
07070100000096000081a400000000000000000000000168ed21dd0000070f000000000000000000000000000000000000002700000000mgradm/shared/kubernetes/dbFinalize.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/rs/zerolog/log"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
batch "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DBFinalizeJobName is the name of the Database finalization job.
const DBFinalizeJobName = "uyuni-db-finalize"
// StartDBFinalizeJob starts the database finalization job.
func StartDBFinalizeJob(
namespace string,
serverImage string,
pullPolicy string,
pullSecret string,
schemaUpdateRequired bool,
migration bool,
) (string, error) {
log.Info().Msg(L("Running database finalization, this could be long depending on the size of the database…"))
job, err := getDBFinalizeJob(namespace, serverImage, pullPolicy, pullSecret, schemaUpdateRequired, migration)
if err != nil {
return "", err
}
return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database finalization job"))
}
func getDBFinalizeJob(
namespace string,
image string,
pullPolicy string,
pullSecret string,
schemaUpdateRequired bool,
migration bool,
) (*batch.Job, error) {
mounts := []types.VolumeMount{
{MountPath: "/var/lib/pgsql", Name: "var-pgsql"},
{MountPath: "/etc/rhn", Name: "etc-rhn"},
}
// Prepare the script
scriptData := templates.FinalizePostgresTemplateData{
RunReindex: migration,
RunSchemaUpdate: schemaUpdateRequired,
Migration: migration,
Kubernetes: true,
}
return kubernetes.GetScriptJob(namespace, DBFinalizeJobName, image, pullPolicy, pullSecret, mounts, scriptData)
}
07070100000097000081a400000000000000000000000168ed21dd000008a9000000000000000000000000000000000000002900000000mgradm/shared/kubernetes/dbUpgradeJob.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"fmt"
"github.com/rs/zerolog/log"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
batch "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DBUpgradeJobName is the name of the database upgrade job.
const DBUpgradeJobName = "uyuni-db-upgrade"
// StartDBUpgradeJob starts the database upgrade job.
func StartDBUpgradeJob(
namespace string,
registry string,
image types.ImageFlags,
migrationImage types.ImageFlags,
pullSecret string,
oldPgsql string,
newPgsql string,
) (string, error) {
log.Info().Msgf(L("Upgrading PostgreSQL database from %[1]s to %[2]s…"), oldPgsql, newPgsql)
var migrationImageURL string
var err error
if migrationImage.Name == "" {
imageName := fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql)
migrationImageURL, err = utils.ComputeImage(registry, image.Tag, image, imageName)
} else {
migrationImageURL, err = utils.ComputeImage(registry, image.Tag, migrationImage)
}
if err != nil {
return "", utils.Error(err, L("failed to compute image URL"))
}
log.Info().Msgf(L("Using database upgrade image %s"), migrationImageURL)
job, err := getDBUpgradeJob(namespace, migrationImageURL, image.PullPolicy, pullSecret, oldPgsql, newPgsql)
if err != nil {
return "", err
}
return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database upgrade job"))
}
func getDBUpgradeJob(
namespace string,
image string,
pullPolicy string,
pullSecret string,
oldPgsql string,
newPgsql string,
) (*batch.Job, error) {
mounts := []types.VolumeMount{
{MountPath: "/var/lib/pgsql", Name: "var-pgsql"},
}
// Prepare the script
scriptData := templates.PostgreSQLVersionUpgradeTemplateData{
OldVersion: oldPgsql,
NewVersion: newPgsql,
}
return kubernetes.GetScriptJob(namespace, DBUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData)
}
07070100000098000081a400000000000000000000000168ed21dd000027c3000000000000000000000000000000000000002700000000mgradm/shared/kubernetes/deployment.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/utils"
cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
// ServerDeployName is the name of the server deployment.
const ServerDeployName = "uyuni"
// CreateServerDeployment creates a new deployment of the server.
func CreateServerDeployment(
namespace string,
serverImage string,
pullPolicy string,
timezone string,
debug bool,
mirrorPvName string,
pullSecret string,
) error {
if mirrorPvName != "" {
// Create a PVC using the required mirror PV
if err := kubernetes.CreatePersistentVolumeClaimForVolume(namespace, mirrorPvName); err != nil {
return err
}
}
serverDeploy := GetServerDeployment(
namespace, serverImage, kubernetes.GetPullPolicy(pullPolicy), timezone, debug, mirrorPvName, pullSecret,
)
return kubernetes.Apply([]runtime.Object{serverDeploy}, L("failed to create the server deployment"))
}
// GetServerDeployment computes the deployment object for an Uyuni server.
func GetServerDeployment(
namespace string,
image string,
pullPolicy core.PullPolicy,
timezone string,
debug bool,
mirrorPvName string,
pullSecret string,
) *apps.Deployment {
var replicas int32 = 1
runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi")
cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount(
"/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory,
)
// Compute the needed ports
ports := utils.GetServerPorts(debug)
template := getServerPodTemplate(image, pullPolicy, timezone, pullSecret)
template.Spec.Volumes = append(template.Spec.Volumes, runVolume, cgroupVolume)
template.Spec.Containers[0].Ports = kubernetes.ConvertPortMaps(ports)
template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts,
runMount, cgroupMount,
)
if mirrorPvName != "" {
// Add a mount for the mirror
template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts,
core.VolumeMount{
Name: mirrorPvName,
MountPath: "/mirror",
},
)
// Add the environment variable for the deployment to use the mirror
// This doesn't makes sense for migration as the setup script is not executed
template.Spec.Containers[0].Env = append(template.Spec.Containers[0].Env,
core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"},
)
}
template.Spec.Containers[0].Lifecycle = &core.Lifecycle{
PreStop: &core.LifecycleHandler{
Exec: &core.ExecAction{
Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"},
},
},
}
template.Spec.Containers[0].ReadinessProbe = &core.Probe{
ProbeHandler: core.ProbeHandler{
HTTPGet: &core.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/rhn/manager/api/api/getVersion",
},
},
PeriodSeconds: 30,
TimeoutSeconds: 20,
FailureThreshold: 5,
}
template.Spec.Containers[0].LivenessProbe = &core.Probe{
ProbeHandler: core.ProbeHandler{
HTTPGet: &core.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/rhn/manager/api/api/getVersion",
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 60,
TimeoutSeconds: 20,
FailureThreshold: 5,
}
deployment := apps.Deployment{
TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: meta.ObjectMeta{
Name: ServerDeployName,
Namespace: namespace,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
// As long as the container cannot scale, we need to stick to recreate strategy
// or the new deployed pods won't be ready.
Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType},
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.ServerComponent},
},
Template: template,
},
}
return &deployment
}
// GetServerPodTemplate computes the pod template with the init container and the minimum viable volumes and mounts.
// This is intended to be shared with the setup job.
func getServerPodTemplate(
image string,
pullPolicy core.PullPolicy,
timezone string,
pullSecret string,
) core.PodTemplateSpec {
envs := []core.EnvVar{
{Name: "TZ", Value: timezone},
}
mounts := GetServerMounts()
// Convert our mounts to Kubernetes objects
volumeMounts := kubernetes.ConvertVolumeMounts(mounts)
// The init mounts are the same mounts but in /mnt just for the init container populating the volumes
var initMounts []core.VolumeMount
for _, mount := range volumeMounts {
initMount := mount.DeepCopy()
initMount.MountPath = "/mnt" + initMount.MountPath
initMounts = append(initMounts, *initMount)
}
volumes := kubernetes.CreateVolumes(mounts)
const caVolumeName = "ca-cert"
const dbcaVolumeName = "db-ca-cert"
caMount := core.VolumeMount{
Name: caVolumeName,
MountPath: "/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT",
ReadOnly: true,
SubPath: "ca.crt",
}
caSaltMount := core.VolumeMount{
Name: caVolumeName,
MountPath: "/usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT",
ReadOnly: true,
SubPath: "ca.crt",
}
caPubMount := core.VolumeMount{
Name: caVolumeName,
MountPath: "/srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT",
ReadOnly: true,
SubPath: "ca.crt",
}
dbcaMount := core.VolumeMount{
Name: dbcaVolumeName,
MountPath: ssl.DBCAContainerPath,
ReadOnly: true,
SubPath: "ca.crt",
}
const tlsVolumeName = "tls"
certMount := core.VolumeMount{Name: tlsVolumeName, MountPath: "/etc/pki/"}
caVolume := kubernetes.CreateConfigVolume(caVolumeName, kubernetes.CAConfigName)
dbcaVolume := kubernetes.CreateConfigVolume(dbcaVolumeName, kubernetes.DBCAConfigName)
var secretMode int32 = 0400
tlsVolume := kubernetes.CreateSecretVolume(tlsVolumeName, kubernetes.CertSecretName)
tlsVolume.Secret.Items = []core.KeyToPath{
{Key: "tls.crt", Path: "tls/certs/spacewalk.crt"},
{Key: "tls.key", Path: "tls/private/spacewalk.key", Mode: &secretMode},
}
volumeMounts = append(volumeMounts, caMount, caSaltMount, caPubMount, dbcaMount, certMount)
volumes = append(volumes, caVolume, dbcaVolume, tlsVolume)
template := core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent),
},
Spec: core.PodSpec{
InitContainers: []core.Container{
{
Name: "init-volumes",
Image: image,
ImagePullPolicy: pullPolicy,
Command: []string{"sh", "-x", "-c", initScript},
VolumeMounts: initMounts,
},
},
Containers: []core.Container{
{
Name: "uyuni",
Image: image,
ImagePullPolicy: pullPolicy,
Env: envs,
VolumeMounts: volumeMounts,
},
},
Volumes: volumes,
},
}
if pullSecret != "" {
template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}}
}
return template
}
const initScript = `
# Fill he empty volumes
for vol in /var/lib/cobbler \
/var/lib/salt \
/var/lib/pgsql \
/var/cache \
/var/log \
/srv/salt \
/srv/www \
/srv/tftpboot \
/srv/formula_metadata \
/srv/pillar \
/srv/susemanager \
/srv/spacewalk \
/root \
/etc/apache2 \
/etc/rhn \
/etc/systemd/system/multi-user.target.wants \
/etc/systemd/system/sockets.target.wants \
/etc/salt \
/etc/tomcat \
/etc/cobbler \
/etc/sysconfig \
/etc/postfix \
/etc/sssd
do
chown --reference=$vol /mnt$vol;
chmod --reference=$vol /mnt$vol;
if [ -z "$(ls -A /mnt$vol)" ]; then
cp -a $vol/. /mnt$vol;
fi
done
`
// GetServerMounts returns the volume mounts required for the server pod.
func GetServerMounts() []types.VolumeMount {
// Filter out the duplicate mounts to avoid issues applying the jobs
serverMounts := utils.ServerVolumeMounts
mounts := []types.VolumeMount{}
mountsSet := map[string]types.VolumeMount{}
for _, mount := range serverMounts {
if _, exists := mountsSet[mount.Name]; !exists {
mounts = append(mounts, mount)
mountsSet[mount.Name] = mount
}
}
return mounts
}
// TuneMounts adjusts the server mounts with the size and storage class passed by as parameters.
func TuneMounts(mounts []types.VolumeMount, flags *cmd_utils.VolumesFlags) []types.VolumeMount {
tunedMounts := []types.VolumeMount{}
for _, mount := range mounts {
class := flags.Class
var volumeFlags *cmd_utils.VolumeFlags
switch mount.Name {
case "var-pgsql":
volumeFlags = &flags.Database
case "var-spacewalk":
volumeFlags = &flags.Packages
case "var-cache":
volumeFlags = &flags.Cache
case "srv-www":
volumeFlags = &flags.Www
}
if volumeFlags != nil {
if volumeFlags.Class != "" {
class = volumeFlags.Class
}
mount.Size = volumeFlags.Size
}
mount.Class = class
tunedMounts = append(tunedMounts, mount)
}
return tunedMounts
}
var runCmdOutput = utils.RunCmdOutput
// getRunningServerImage extracts the main server container image from a running deployment.
func getRunningServerImage(namespace string) string {
out, err := runCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, ServerDeployName,
"-o", "jsonpath={.spec.template.spec.containers[0].image}",
)
if err != nil {
// Errors could be that the namespace or deployment doesn't exist, just return no image.
log.Debug().Err(err).Msg("failed to get the running server container image")
return ""
}
return strings.TrimSpace(string(out))
}
07070100000099000081a400000000000000000000000168ed21dd00000349000000000000000000000000000000000000002c00000000mgradm/shared/kubernetes/deployment_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"errors"
"fmt"
"testing"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
)
func TestGetRunningServerImage(t *testing.T) {
type dataType struct {
err error
out string
expected string
}
data := []dataType{
{nil, "registry.opensuse.org/uyuni/server:latest\n", "registry.opensuse.org/uyuni/server:latest"},
{errors.New("deployment not found"), "", ""},
}
for i, test := range data {
runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) {
return []byte(test.out), test.err
}
actual := getRunningServerImage("myns")
testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i), test.expected, actual)
}
}
0707010000009a000081a400000000000000000000000168ed21dd00000227000000000000000000000000000000000000002200000000mgradm/shared/kubernetes/flags.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
// KubernetesServerFlags is the aggregation of all flags for install, upgrade and migrate.
type KubernetesServerFlags struct {
utils.ServerFlags `mapstructure:",squash"`
Kubernetes utils.KubernetesFlags
Volumes utils.VolumesFlags
// SSH defines the SSH configuration to use to connect to the source server to migrate.
SSH utils.SSHFlags
}
0707010000009b000081a400000000000000000000000168ed21dd00000c81000000000000000000000000000000000000002300000000mgradm/shared/kubernetes/hubApi.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"fmt"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// HubAPIDeployName is the deployment name of the Hub API.
HubAPIDeployName = "uyuni-hub-api"
hubAPIServiceName = "hub-api"
)
// InstallHubAPI installs the Hub API deployment and service.
func InstallHubAPI(namespace string, image string, pullPolicy string, pullSecret string) error {
if err := startHubAPIDeployment(namespace, image, pullPolicy, pullSecret); err != nil {
return err
}
if err := createHubAPIService(namespace); err != nil {
return err
}
// TODO Do we want an ingress to use port 80 / 443 from the outside too?
// This would have an impact on the user's scripts.
return nil
}
func startHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) error {
deploy := getHubAPIDeployment(namespace, image, pullPolicy, pullSecret)
return kubernetes.Apply([]runtime.Object{deploy}, L("failed to create the hub API deployment"))
}
func getHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) *apps.Deployment {
var replicas int32 = 1
deploy := &apps.Deployment{
TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: meta.ObjectMeta{
Name: HubAPIDeployName,
Namespace: namespace,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &meta.LabelSelector{
MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent),
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent),
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "uyuni-hub-api",
Image: image,
ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy),
Ports: []core.ContainerPort{
{
ContainerPort: int32(2830),
},
},
Env: []core.EnvVar{
{Name: "HUB_API_URL", Value: fmt.Sprintf("http://%s/rpc/api", utils.WebServiceName)},
{Name: "HUB_CONNECT_TIMEOUT", Value: "10"},
{Name: "HUB_REQUEST_TIMEOUT", Value: "10"},
{Name: "HUB_CONNECT_USING_SSL", Value: "false"},
},
},
},
},
},
},
}
if pullSecret != "" {
deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}}
}
return deploy
}
func createHubAPIService(namespace string) error {
svc := getService(namespace, kubernetes.ServerApp, kubernetes.HubAPIComponent, hubAPIServiceName, core.ProtocolTCP,
utils.NewPortMap(utils.HubAPIServiceName, "api", 2830, 2830),
)
return kubernetes.Apply([]runtime.Object{svc}, L("failed to create the hub API service"))
}
0707010000009c000081a400000000000000000000000168ed21dd0000166f000000000000000000000000000000000000002400000000mgradm/shared/kubernetes/ingress.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
net "k8s.io/api/networking/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
IngressNameSSL = "uyuni-ingress-ssl"
IngressNameSSLRedirect = "uyuni-ingress-ssl-redirect"
IngressNameNoSSL = "uyuni-ingress-nossl"
)
// CreateIngress creates the ingress definitions for Uyuni server.
//
// fqdn is the fully qualified domain name associated with the Uyuni server.
//
// caIssuer is the name of the cert-manager to associate for the SSL routes.
// It can be empty if cert-manager is not used.
//
// ingressName is one of traefik or nginx.
func CreateIngress(namespace string, fqdn string, caIssuer string, ingressName string) error {
ingresses := GetIngresses(namespace, fqdn, caIssuer, ingressName)
return kubernetes.Apply(ingresses, L("failed to create the ingresses"))
}
// GetIngresses returns the ingress definitions to create based on the name of the ingress.
// If ingressName is neither nginx nor traefik, no ingress rules are returned.
func GetIngresses(namespace string, fqdn string, caIssuer string, ingressName string) []*net.Ingress {
ingresses := []*net.Ingress{}
if ingressName != "nginx" && ingressName != "traefik" {
return ingresses
}
ingresses = append(ingresses,
getSSLIngress(namespace, fqdn, caIssuer, ingressName),
getNoSSLIngress(namespace, fqdn, ingressName),
)
sslRedirectIngress := getSSLRedirectIngress(namespace, fqdn, ingressName)
if sslRedirectIngress != nil {
ingresses = append(ingresses, sslRedirectIngress)
}
return ingresses
}
func getSSLIngress(namespace string, fqdn string, caIssuer string, ingressName string) *net.Ingress {
annotations := map[string]string{}
if caIssuer != "" {
annotations["cert-manager.io/issuer"] = caIssuer
}
if ingressName == "traefik" {
annotations["traefik.ingress.kubernetes.io/router.tls"] = "true"
annotations["traefik.ingress.kubernetes.io/router.tls.domains.n.main"] = fqdn
annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "websecure,web"
}
ingress := net.Ingress{
TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: IngressNameSSL,
Annotations: annotations,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
Spec: net.IngressSpec{
TLS: []net.IngressTLS{
{Hosts: []string{fqdn}, SecretName: kubernetes.CertSecretName},
},
Rules: []net.IngressRule{
getIngressWebRule(fqdn),
},
},
}
return &ingress
}
func getSSLRedirectIngress(namespace string, fqdn string, ingressName string) *net.Ingress {
var ingress *net.Ingress
// Nginx doesn't require a special ingress for the SSL redirection.
if ingressName == "traefik" {
ingress = &net.Ingress{
TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: IngressNameSSLRedirect,
Annotations: map[string]string{
"traefik.ingress.kubernetes.io/router.middlewares": "default-uyuni-https-redirect@kubernetescrd",
"traefik.ingress.kubernetes.io/router.entrypoints": "web",
},
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
Spec: net.IngressSpec{
Rules: []net.IngressRule{
getIngressWebRule(fqdn),
},
},
}
}
return ingress
}
var noSSLPaths = []string{
"/pub",
"/rhn/([^/])+/DownloadFile",
"/(rhn/)?rpc/api",
"/rhn/errors",
"/rhn/ty/TinyUrl",
"/rhn/websocket",
"/rhn/metrics",
"/cobbler_api",
"/cblr",
"/httpboot",
"/images",
"/cobbler",
"/os-images",
"/tftp",
"/docs",
}
func getNoSSLIngress(namespace string, fqdn string, ingressName string) *net.Ingress {
annotations := map[string]string{}
if ingressName == "nginx" {
annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
}
if ingressName == "traefik" {
annotations["traefik.ingress.kubernetes.io/router.tls"] = "false"
annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "web"
}
pathType := net.PathTypePrefix
paths := []net.HTTPIngressPath{}
for _, noSSLPath := range noSSLPaths {
paths = append(paths, net.HTTPIngressPath{
Backend: webServiceBackend,
Path: noSSLPath,
PathType: &pathType,
})
}
ingress := net.Ingress{
TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: IngressNameNoSSL,
Annotations: annotations,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
Spec: net.IngressSpec{
TLS: []net.IngressTLS{
{Hosts: []string{fqdn}, SecretName: kubernetes.CertSecretName},
},
Rules: []net.IngressRule{
{
Host: fqdn,
IngressRuleValue: net.IngressRuleValue{
HTTP: &net.HTTPIngressRuleValue{Paths: paths},
},
},
},
},
}
return &ingress
}
// build the ingress rule object catching all HTTP traffic.
func getIngressWebRule(fqdn string) net.IngressRule {
pathType := net.PathTypePrefix
return net.IngressRule{
Host: fqdn,
IngressRuleValue: net.IngressRuleValue{
HTTP: &net.HTTPIngressRuleValue{
Paths: []net.HTTPIngressPath{
{
Backend: webServiceBackend,
Path: "/",
PathType: &pathType,
},
},
},
},
}
}
var webServiceBackend net.IngressBackend = net.IngressBackend{
Service: &net.IngressServiceBackend{
Name: utils.WebServiceName,
Port: net.ServiceBackendPort{Number: 80},
},
}
0707010000009d000081a400000000000000000000000168ed21dd000002ad000000000000000000000000000000000000002600000000mgradm/shared/kubernetes/namespace.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// CreateNamespace creates a kubernetes namespace.
func CreateNamespace(namespace string) error {
ns := core.Namespace{
TypeMeta: meta.TypeMeta{Kind: "Namespace", APIVersion: "v1"},
ObjectMeta: meta.ObjectMeta{
Name: namespace,
},
}
return kubernetes.Apply([]runtime.Object{&ns}, L("failed to create the namespace"))
}
0707010000009e000081a400000000000000000000000168ed21dd000002c5000000000000000000000000000000000000002100000000mgradm/shared/kubernetes/node.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
)
// deployNodeConfig deploy configuration files on the node.
func deployNodeConfig(
namespace string,
clusterInfos *kubernetes.ClusterInfos,
needsHub bool,
debug bool,
) error {
// If installing on k3s, install the traefik helm config in manifests
isK3s := clusterInfos.IsK3s()
IsRke2 := clusterInfos.IsRke2()
ports := getPortList(needsHub, debug)
if isK3s {
return kubernetes.InstallK3sTraefikConfig(ports)
} else if IsRke2 {
return kubernetes.InstallRke2NginxConfig(ports, namespace)
}
return nil
}
0707010000009f000081a400000000000000000000000168ed21dd0000020b000000000000000000000000000000000000002200000000mgradm/shared/kubernetes/ports.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// getPortList returns compiled lists of tcp and udp ports..
func getPortList(hub bool, debug bool) []types.PortMap {
ports := utils.GetServerPorts(debug)
ports = append(ports, utils.ReportDBPorts...)
if hub {
ports = append(ports, utils.HubXmlrpcPorts...)
}
return ports
}
070701000000a0000081a400000000000000000000000168ed21dd0000053b000000000000000000000000000000000000002b00000000mgradm/shared/kubernetes/postUpgradeJob.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"github.com/rs/zerolog/log"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
batch "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// PostUpgradeJobName is the name of the job apply the database changes after the upgrade.
const PostUpgradeJobName = "uyuni-post-upgrade"
// StartPostUpgradeJob starts the job applying the database changes after the upgrade.
func StartPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (string, error) {
log.Info().Msg(L("Performing post upgrade changes…"))
job, err := getPostUpgradeJob(namespace, image, pullPolicy, pullSecret)
if err != nil {
return "", err
}
return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the post upgrade job"))
}
func getPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (*batch.Job, error) {
scriptData := templates.PostUpgradeTemplateData{}
mounts := GetServerMounts()
return kubernetes.GetScriptJob(namespace, PostUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData)
}
070701000000a1000081a400000000000000000000000168ed21dd0000355a000000000000000000000000000000000000002600000000mgradm/shared/kubernetes/reconcile.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"encoding/base64"
"errors"
"fmt"
"os"
"os/exec"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// Reconcile upgrades, migrate or install the server.
func Reconcile(flags *KubernetesServerFlags, fqdn string) error {
if _, err := exec.LookPath("kubectl"); err != nil {
return errors.New(L("install kubectl before running this command"))
}
namespace := flags.Kubernetes.Uyuni.Namespace
// Create the namespace if not present
if err := CreateNamespace(namespace); err != nil {
return err
}
serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image)
if err != nil {
return utils.Error(err, L("failed to compute image URL"))
}
// Create a secret using SCC credentials if any are provided
pullSecret, err := kubernetes.GetRegistrySecret(
flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp,
)
if err != nil {
return err
}
// Do we have an existing deployment to upgrade?
// This can be freshly synchronized data from a migration or a running instance to upgrade.
hasDeployment := kubernetes.HasDeployment(namespace, kubernetes.ServerFilter)
// Check that the postgresql PVC is bound to a Volume.
hasDatabase := kubernetes.HasVolume(namespace, "var-pgsql")
isMigration := hasDatabase && !hasDeployment
cocoReplicas := kubernetes.GetReplicas(namespace, CocoDeployName)
if cocoReplicas != 0 && !flags.Coco.IsChanged {
// Upgrade: detect the number of running coco replicas
flags.Coco.Replicas = cocoReplicas
}
var inspectedData utils.ServerInspectData
if hasDatabase {
// Inspect the image and the existing volumes
data, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret)
if err != nil {
return err
}
inspectedData = *data
// Use the inspected DB port and name if not defined in the flags
if flags.Installation.DB.Port == 0 && data.DBPort != 0 {
flags.Installation.DB.Port = data.DBPort
}
if flags.Installation.DB.Name == "" && data.DBName != "" {
flags.Installation.DB.Name = data.DBName
}
// TODO Do we have a running database deployment?
// Do we have a running server deploy? which version is it?
// If there is no deployment / image, don't check the uyuni / SUMA upgrades
// TODO If the DB is already in a separate deployment, there surely is no need to run an inspect on the server pod.
var runningData *utils.ServerInspectData
if runningImage := getRunningServerImage(namespace); runningImage != "" {
runningData, err = kubernetes.InspectServer(namespace, runningImage, "Never", pullSecret)
if err != nil {
return err
}
}
// Run sanity checks for upgrade
if err := adm_utils.SanityCheck(runningData, &inspectedData); err != nil {
return err
}
// Get the fqdn from the inspected data if possible. Ignore difference with input value for now.
fqdn = inspectedData.Fqdn
if hasDeployment {
// Scale down all deployments relying on the DB since it will be brought down during upgrade.
if cocoReplicas > 0 {
if err := kubernetes.ReplicasTo(namespace, CocoDeployName, 0); err != nil {
return utils.Error(err, L("cannot set confidential computing containers replicas to 0"))
}
}
// Scale down server deployment if present to upgrade the DB
if err := kubernetes.ReplicasTo(namespace, ServerDeployName, 0); err != nil {
return utils.Error(err, L("cannot set server replicas to 0"))
}
// TODO Scale down the DB container?
}
}
// Don't check the FQDN too early or we may not have it in case of upgrade.
if err := utils.IsValidFQDN(fqdn); err != nil {
return err
}
// TODO IsLocal() is not enough for Kubernetes as users can define their own db / reportdb service pointing
// to whatever they want
localDB := flags.Installation.DB.IsLocal()
mounts := GetServerMounts()
if localDB {
mounts = append(mounts, utils.VarPgsqlDataVolumeMount)
}
mounts = TuneMounts(mounts, &flags.Volumes)
if err := kubernetes.CreatePersistentVolumeClaims(namespace, mounts); err != nil {
return err
}
if hasDatabase {
oldPgVersion := inspectedData.CommonInspectData.CurrentPgVersion
newPgVersion := inspectedData.DBInspectData.ImagePgVersion
// TODO Split DB upgrade if needed or merge it in another job (which?)
// Run the DB Upgrade job if needed
if oldPgVersion < newPgVersion {
jobName, err := StartDBUpgradeJob(
namespace, flags.Image.Registry, flags.Image, flags.DBUpgradeImage, pullSecret,
oldPgVersion, newPgVersion,
)
if err != nil {
return err
}
// Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade
if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil {
return err
}
} else if oldPgVersion > newPgVersion {
return fmt.Errorf(
L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion)
}
// Run DB finalization job
schemaUpdateRequired := oldPgVersion != newPgVersion
jobName, err := StartDBFinalizeJob(
namespace, serverImage, flags.Image.PullPolicy, pullSecret, schemaUpdateRequired, isMigration,
)
if err != nil {
return err
}
// Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex
if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil {
return err
}
// Run the Post Upgrade job
jobName, err = StartPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy, pullSecret)
if err != nil {
return err
}
if err := kubernetes.WaitForJob(namespace, jobName, 60); err != nil {
return err
}
}
// Extract some data from the cluster to guess how to configure Uyuni.
clusterInfos, err := kubernetes.CheckCluster()
if err != nil {
return err
}
if replicas := kubernetes.GetReplicas(namespace, ServerDeployName); replicas > 0 && !flags.HubXmlrpc.IsChanged {
// Upgrade: detect the number of existing hub xmlrpc replicas
flags.HubXmlrpc.Replicas = replicas
}
needsHub := flags.HubXmlrpc.Replicas > 0
// Install the traefik / nginx config on the node
// This will never be done in an operator.
if err := deployNodeConfig(namespace, clusterInfos, needsHub, flags.Installation.Debug.Java); err != nil {
return err
}
// Deploy the SSL CA and server certificates
var caIssuer string
if flags.Installation.SSL.UseProvided() {
if err := DeployExistingCertificate(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL); err != nil {
return err
}
} else if !HasIssuer(namespace, kubernetes.CAIssuerName) {
// cert-manager is not required for 3rd party certificates, only if we have the CA key.
// Note that in an operator we won't be able to install cert-manager and just wait for it to be installed.
kubeconfig := clusterInfos.GetKubeconfig()
if err := InstallCertManager(&flags.Kubernetes, kubeconfig, flags.Image.PullPolicy); err != nil {
return utils.Error(err, L("cannot install cert manager"))
}
if flags.Installation.SSL.UseMigratedCa() {
// Convert CA to RSA to use in a Kubernetes TLS secret.
// In an operator we would have to fail now if there is no SSL password as we cannot prompt it.
rootCA, err := os.ReadFile(flags.Installation.SSL.Ca.Root)
if err != nil {
return utils.Error(err, L("failed to read Root CA file"))
}
ca := types.SSLPair{
Key: base64.StdEncoding.EncodeToString(
ssl.GetRsaKey(flags.Installation.SSL.Ca.Key, flags.Installation.SSL.Password),
),
Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(string(rootCA))),
}
// Install the cert-manager issuers
if err := DeployReusedCA(namespace, &ca, fqdn); err != nil {
return err
}
} else {
if err := DeployGeneratedCA(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL, fqdn); err != nil {
return err
}
}
// Wait for issuer to be ready
if err := waitForIssuer(flags.Kubernetes.Uyuni.Namespace, kubernetes.CAIssuerName); err != nil {
return err
}
// Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret
if err := extractCACertToConfig(flags.Kubernetes.Uyuni.Namespace); err != nil {
return err
}
caIssuer = kubernetes.CAIssuerName
}
// Create the Ingress routes before the deployments as those are triggering
// the creation of the uyuni-cert secret from cert-manager.
if err := CreateIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil {
return err
}
// Wait for uyuni-cert secret to be ready
kubernetes.WaitForSecret(namespace, kubernetes.CertSecretName)
// Create the services
if err := CreateServices(namespace, flags.Installation.Debug.Java); err != nil {
return err
}
// Store the DB credentials in a secret.
if flags.Installation.DB.User != "" && flags.Installation.DB.Password != "" {
if err := CreateBasicAuthSecret(
namespace, DBSecret, flags.Installation.DB.User, flags.Installation.DB.Password,
); err != nil {
return err
}
}
if flags.Installation.ReportDB.User != "" && flags.Installation.ReportDB.Password != "" {
if err := CreateBasicAuthSecret(
namespace, ReportdbSecret, flags.Installation.ReportDB.User, flags.Installation.ReportDB.Password,
); err != nil {
return err
}
}
if !hasDatabase {
// Wait for the DB secrets: TLS, ReportDB and DB credentials
kubernetes.WaitForSecret(namespace, DBSecret)
kubernetes.WaitForSecret(namespace, ReportdbSecret)
kubernetes.WaitForSecret(namespace, kubernetes.DBCertSecretName)
if localDB {
// Create the secret for admin credentials
if err := CreateBasicAuthSecret(
namespace, DBAdminSecret, flags.Installation.DB.Admin.User, flags.Installation.DB.Admin.Password,
); err != nil {
return err
}
kubernetes.WaitForSecret(namespace, DBAdminSecret)
dbImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Pgsql.Image)
if err != nil {
return utils.Error(err, L("failed to compute image URL"))
}
// Create the split DB deployment
if err := CreateDBDeployment(
namespace, dbImage, flags.Image.PullPolicy, pullSecret, flags.Installation.TZ,
); err != nil {
return err
}
}
}
// This SCCSecret is used to mount the env variable in the setup job and is different from the
// pullSecret as it is of a different type: basic-auth vs docker.
if flags.Installation.SCC.User != "" && flags.Installation.SCC.Password != "" {
if err := CreateBasicAuthSecret(
namespace, SCCSecret, flags.Installation.SCC.User, flags.Installation.SCC.Password,
); err != nil {
return err
}
}
adminSecret := "admin-credentials"
if flags.Installation.Admin.Login != "" && flags.Installation.Admin.Password != "" {
if err := CreateBasicAuthSecret(
namespace, adminSecret, flags.Installation.Admin.Login, flags.Installation.Admin.Password,
); err != nil {
return err
}
}
// TODO For a migration or an upgrade this needs to be skipped
// Run the setup script.
// The script will be skipped if the server has already been setup.
jobName, err := StartSetupJob(
namespace, serverImage, kubernetes.GetPullPolicy(flags.Image.PullPolicy), pullSecret,
flags.Volumes.Mirror, &flags.Installation, fqdn, adminSecret, DBSecret, ReportdbSecret, SCCSecret,
)
if err != nil {
return err
}
if err := kubernetes.WaitForJob(namespace, jobName, 120); err != nil {
return err
}
if clusterInfos.Ingress == "traefik" {
// Create the Traefik routes
if err := CreateTraefikRoutes(namespace, needsHub, flags.Installation.Debug.Java); err != nil {
return err
}
}
// Start the server
if err := CreateServerDeployment(
namespace, serverImage, flags.Image.PullPolicy, flags.Installation.TZ, flags.Installation.Debug.Java,
flags.Volumes.Mirror, pullSecret,
); err != nil {
return err
}
deploymentsStarting := []string{ServerDeployName}
// Start the Coco Deployments if requested.
if replicas := kubernetes.GetReplicas(namespace, CocoDeployName); replicas != 0 && !flags.Coco.IsChanged {
// Upgrade: detect the number of running coco replicas
flags.Coco.Replicas = replicas
}
if flags.Coco.Replicas > 0 {
cocoImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.Coco.Image)
if err != nil {
return err
}
if err := StartCocoDeployment(
namespace, cocoImage, flags.Image.PullPolicy, pullSecret, flags.Coco.Replicas,
flags.Installation.DB.Port, flags.Installation.DB.Name,
); err != nil {
return err
}
deploymentsStarting = append(deploymentsStarting, CocoDeployName)
}
// In an operator mind, the user would just change the custom resource to enable the feature.
if needsHub {
// Install Hub API deployment, service
hubAPIImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image)
if err != nil {
return err
}
if err := InstallHubAPI(namespace, hubAPIImage, flags.Image.PullPolicy, pullSecret); err != nil {
return err
}
deploymentsStarting = append(deploymentsStarting, HubAPIDeployName)
}
// Wait for all the other deployments to be ready
if err := kubernetes.WaitForDeployments(namespace, deploymentsStarting...); err != nil {
return err
}
return nil
}
070701000000a2000081a400000000000000000000000168ed21dd00000e16000000000000000000000000000000000000002500000000mgradm/shared/kubernetes/services.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"fmt"
"strings"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// Map the service names to the component label that are not the server.
var serviceMap = map[string]string{
utils.DBServiceName: kubernetes.DBComponent,
utils.ReportdbServiceName: kubernetes.DBComponent,
}
// CreateServices creates the kubernetes services for the server.
//
// If debug is true, the Java debug ports will be exposed.
func CreateServices(namespace string, debug bool) error {
services := GetServices(namespace, debug)
for _, svc := range services {
if !hasCustomService(namespace, svc.ObjectMeta.Name) {
if err := kubernetes.Apply([]*core.Service{svc}, L("failed to create the service")); err != nil {
return err
}
}
}
return nil
}
// GetServices creates the definitions of all the services of the server.
//
// If debug is true, the Java debug ports will be exposed.
func GetServices(namespace string, debug bool) []*core.Service {
ports := utils.GetServerPorts(debug)
ports = append(ports, utils.DBPorts...)
ports = append(ports, utils.ReportDBPorts...)
servicesPorts := map[string][]types.PortMap{}
for _, port := range ports {
svcPorts := servicesPorts[port.Service]
if svcPorts == nil {
svcPorts = []types.PortMap{}
}
svcPorts = append(svcPorts, port)
servicesPorts[port.Service] = svcPorts
}
services := []*core.Service{}
for _, svcPorts := range servicesPorts {
protocol := core.ProtocolTCP
if svcPorts[0].Protocol == "udp" {
protocol = core.ProtocolUDP
}
// Do we have a split component for that service already?
component := kubernetes.ServerComponent
if comp, exists := serviceMap[svcPorts[0].Service]; exists {
component = comp
}
services = append(services,
getService(namespace, kubernetes.ServerApp, component, svcPorts[0].Service, protocol, svcPorts...),
)
}
return services
}
func getService(
namespace string,
app string,
component string,
name string,
protocol core.Protocol,
ports ...types.PortMap,
) *core.Service {
// TODO make configurable to allow NodePort and maybe LoadBalancer for exposed services.
serviceType := core.ServiceTypeClusterIP
portObjs := []core.ServicePort{}
for _, port := range ports {
portObjs = append(portObjs, core.ServicePort{
Name: port.Name,
Port: int32(port.Exposed),
TargetPort: intstr.FromInt(port.Port),
Protocol: protocol,
})
}
return &core.Service{
TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Service"},
ObjectMeta: meta.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: kubernetes.GetLabels(app, component),
},
Spec: core.ServiceSpec{
Ports: portObjs,
Selector: map[string]string{kubernetes.ComponentLabel: component},
Type: serviceType,
},
}
}
func hasCustomService(namespace string, name string) bool {
out, err := utils.RunCmdOutput(
zerolog.DebugLevel, "kubectl", "get", "svc", "-n", namespace, name,
"-l", fmt.Sprintf("%s!=%s", kubernetes.AppLabel, kubernetes.ServerApp),
"-o", "jsonpath={.items[?(@.metadata.name=='db')]}",
)
// Custom services don't have our app label!
return err == nil && strings.TrimSpace(string(out)) != ""
}
070701000000a3000081a400000000000000000000000168ed21dd0000199b000000000000000000000000000000000000002200000000mgradm/shared/kubernetes/setup.go//SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"time"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
batch "k8s.io/api/batch/v1"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const SetupJobName = "uyuni-setup"
// StartSetupJob creates the job setting up the server.
func StartSetupJob(
namespace string,
image string,
pullPolicy core.PullPolicy,
pullSecret string,
mirrorPvName string,
flags *adm_utils.InstallationFlags,
fqdn string,
adminSecret string,
dbSecret string,
reportdbSecret string,
sccSecret string,
) (string, error) {
job, err := GetSetupJob(
namespace, image, pullPolicy, pullSecret, mirrorPvName, flags, fqdn,
adminSecret, dbSecret, reportdbSecret, sccSecret,
)
if err != nil {
return "", err
}
return job.ObjectMeta.Name, kubernetes.Apply([]*batch.Job{job}, L("failed to run the setup job"))
}
// GetSetupJob creates the job definition object for the setup.
func GetSetupJob(
namespace string,
image string,
pullPolicy core.PullPolicy,
pullSecret string,
mirrorPvName string,
flags *adm_utils.InstallationFlags,
fqdn string,
adminSecret string,
dbSecret string,
reportdbSecret string,
sccSecret string,
) (*batch.Job, error) {
var maxFailures int32
timestamp := time.Now().Format("20060102150405")
template := getServerPodTemplate(image, pullPolicy, flags.TZ, pullSecret)
script, err := adm_utils.GenerateSetupScript(flags, true)
if err != nil {
return nil, err
}
template.Spec.Containers[0].Name = "setup"
template.Spec.Containers[0].Command = []string{"sh", "-c", script}
template.Spec.RestartPolicy = core.RestartPolicyNever
optional := false
dbUserEnv := core.EnvVar{Name: "MANAGER_USER", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: dbSecret},
Key: "username",
Optional: &optional,
},
}}
reportdbUserEnv := core.EnvVar{Name: "REPORT_DB_USER", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret},
Key: "username",
Optional: &optional,
},
}}
envVars := []core.EnvVar{
{Name: "ADMIN_USER", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: adminSecret},
Key: "username",
Optional: &optional,
},
}},
{Name: "ADMIN_PASS", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: adminSecret},
Key: "password",
Optional: &optional,
},
}},
dbUserEnv,
{Name: "MANAGER_PASS", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: dbSecret},
Key: "password",
Optional: &optional,
},
}},
reportdbUserEnv,
{Name: "REPORT_DB_PASS", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret},
Key: "password",
Optional: &optional,
},
}},
{Name: "REPORT_DB_CA_CERT", Value: ssl.DBCAContainerPath},
// EXTERNALDB_* variables are not passed yet: only for AWS and it probably doesn't make sense for kubernetes yet.
}
// The DB and ReportDB port is expected to be the standard one.
// When using an external database with a custom port the only solution is to access it using
// its IP address and a headless service with a custom EndpointSlice.
// If this is too big a constraint, we'll have to accept the port as a parameter too.
env := adm_utils.GetSetupEnv(mirrorPvName, flags, fqdn, true)
for key, value := range env {
envVars = append(envVars, core.EnvVar{Name: key, Value: value})
}
if sccSecret != "" {
envVars = append(envVars,
core.EnvVar{Name: "SCC_USER", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: sccSecret},
Key: "username",
Optional: &optional,
},
}},
core.EnvVar{Name: "SCC_PASS", ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{Name: sccSecret},
Key: "password",
Optional: &optional,
},
}},
)
}
if mirrorPvName != "" {
envVars = append(envVars, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"})
}
template.Spec.Containers[0].Env = envVars
template.Spec.Volumes = append(template.Spec.Volumes,
kubernetes.CreateConfigVolume("db-ca", kubernetes.DBCAConfigName),
)
// Add initContainer waiting for the db and reportdb services to be responding
template.Spec.InitContainers = append(template.Spec.InitContainers,
core.Container{
Name: "db-waiter",
Image: image,
ImagePullPolicy: pullPolicy,
Env: []core.EnvVar{
{Name: "MANAGER_DB_HOST", Value: env["MANAGER_DB_HOST"]},
{Name: "MANAGER_DB_PORT", Value: env["MANAGER_DB_PORT"]},
{Name: "MANAGER_DB_NAME", Value: env["MANAGER_DB_NAME"]},
dbUserEnv,
{Name: "REPORT_DB_HOST", Value: env["REPORT_DB_HOST"]},
{Name: "REPORT_DB_PORT", Value: env["REPORT_DB_PORT"]},
{Name: "REPORT_DB_NAME", Value: env["REPORT_DB_NAME"]},
reportdbUserEnv,
},
Command: []string{
"sh", "-c",
`
until pg_isready -U $MANAGER_USER -h $MANAGER_DB_HOST -p $MANAGER_DB_PORT -d $MANAGER_DB_NAME; do
sleep 60
done
until pg_isready -U $REPORT_DB_USER -h $REPORT_DB_HOST -p $REPORT_DB_PORT -d $REPORT_DB_NAME; do
sleep 60
done
`,
},
},
)
job := batch.Job{
TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"},
ObjectMeta: meta.ObjectMeta{
Name: SetupJobName + "-" + timestamp,
Namespace: namespace,
Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""),
},
Spec: batch.JobSpec{
Template: template,
BackoffLimit: &maxFailures,
},
}
if pullSecret != "" {
job.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}}
}
return &job, nil
}
070701000000a4000081a400000000000000000000000168ed21dd00000cea000000000000000000000000000000000000002400000000mgradm/shared/kubernetes/traefik.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"fmt"
"html/template"
"io"
"os"
"path"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
)
// CreateTraefikRoutes creates the routes and middleware wiring the traefik endpoints to their service.
func CreateTraefikRoutes(namespace string, hub bool, debug bool) error {
routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate))
tempDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
filePath := path.Join(tempDir, "routes.yaml")
file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
return utils.Errorf(err, L("failed to open %s for writing"), filePath)
}
defer file.Close()
// Write the SSL Redirect middleware
_, err = file.WriteString(fmt.Sprintf(`
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: uyuni-https-redirect
namespace: "%s"
labels:
%s: %s
spec:
redirectScheme:
scheme: https
permanent: true
`, namespace, kubernetes.AppLabel, kubernetes.ServerApp))
if err != nil {
return utils.Error(err, L("failed to write traefik middleware and routes to file"))
}
// Write the routes from the endpoint to the services
for _, endpoint := range getPortList(hub, debug) {
_, err := file.WriteString("---\n")
if err != nil {
return utils.Error(err, L("failed to write traefik middleware and routes to file"))
}
if err := getTraefixRoute(routeTemplate, file, namespace, endpoint); err != nil {
return err
}
}
if err := file.Close(); err != nil {
return utils.Error(err, L("failed to close traefik middleware and routes file"))
}
if _, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "apply", "-f", filePath); err != nil {
return utils.Error(err, L("failed to create traefik middleware and routes"))
}
return nil
}
func getTraefixRoute(t *template.Template, writer io.Writer, namespace string, endpoint types.PortMap) error {
endpointName := kubernetes.GetTraefikEndpointName(endpoint)
protocol := "TCP"
if endpoint.Protocol == "udp" {
protocol = "UDP"
}
data := routeData{
Name: endpointName + "-route",
Namespace: namespace,
EndPoint: endpointName,
Service: endpoint.Service,
Port: endpoint.Exposed,
Protocol: protocol,
}
if err := t.Execute(writer, data); err != nil {
return utils.Error(err, L("failed to write traefik routes to file"))
}
return nil
}
type routeData struct {
Name string
Namespace string
EndPoint string
Service string
Port int
Protocol string
}
const ingressRouteTemplate = `
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute{{ .Protocol }}
metadata:
name: {{ .Name }}
namespace: "{{ .Namespace }}"
labels:
` + kubernetes.AppLabel + ": " + kubernetes.ServerApp + `
spec:
entryPoints:
- {{ .EndPoint }}
routes:
- services:
- name: {{ .Service }}
port: {{ .Port }}
{{- if eq .Protocol "TCP" }}
match: ` + "HostSNI(`*`)" + `
{{- end }}
`
070701000000a5000081a400000000000000000000000168ed21dd0000074f000000000000000000000000000000000000002900000000mgradm/shared/kubernetes/traefik_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
//go:build !nok8s
package kubernetes
import (
"bytes"
"html/template"
"testing"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestGetTraefikRouteTCP(t *testing.T) {
routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate))
var buf bytes.Buffer
err := getTraefixRoute(routeTemplate, &buf, "foo", utils.NewPortMap("svcname", "port1", 123, 456))
if err != nil {
t.Errorf("Unexpected error: %s", err)
}
actual := buf.String()
expected := `
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: svcname-port1-route
namespace: "foo"
labels:
app.kubernetes.io/part-of: uyuni
spec:
entryPoints:
- svcname-port1
routes:
- services:
- name: svcname
port: 123
match: ` + "HostSNI(`*`)\n"
testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual)
}
func TestGetTraefikRouteUDP(t *testing.T) {
routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate))
var buf bytes.Buffer
err := getTraefixRoute(routeTemplate, &buf, "foo",
types.PortMap{
Service: "svcname",
Name: "port1",
Exposed: 123,
Port: 456,
Protocol: "udp",
})
if err != nil {
t.Errorf("Unexpected error: %s", err)
}
actual := buf.String()
expected := `
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteUDP
metadata:
name: svcname-port1-route
namespace: "foo"
labels:
app.kubernetes.io/part-of: uyuni
spec:
entryPoints:
- svcname-port1
routes:
- services:
- name: svcname
port: 123
`
testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual)
}
070701000000a6000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001900000000mgradm/shared/kubernetes070701000000a7000081a400000000000000000000000168ed21dd00000fae000000000000000000000000000000000000001d00000000mgradm/shared/pgsql/pgsql.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package pgsql
import (
"fmt"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func PreparePgsqlImage(
authFile string,
pgsqlFlags *types.PgsqlFlags,
globalImageFlags *types.ImageFlags,
) (string, error) {
image := pgsqlFlags.Image
pgsqlImage, err := utils.ComputeImage(globalImageFlags.Registry, globalImageFlags.Tag, image)
if err != nil {
return "", utils.Error(err, L("failed to compute image URL"))
}
preparedImage, err := podman.PrepareImage(authFile, pgsqlImage, globalImageFlags.PullPolicy, true)
if err != nil {
return "", err
}
return preparedImage, err
}
// SetupPgsql prepares the systemd service and starts it if needed.
func SetupPgsql(
systemd podman.Systemd,
pgsqlImage string,
) error {
if err := GeneratePgsqlSystemdService(systemd, pgsqlImage); err != nil {
return utils.Error(err, L("cannot generate systemd service"))
}
if err := EnablePgsql(systemd); err != nil {
return err
}
cnx := shared.NewConnection("podman", podman.DBContainerName, "")
if err := cnx.WaitForHealthcheck(); err != nil {
return utils.Errorf(err, L("%s fails healtcheck"), podman.DBContainerName)
}
return nil
}
// EnablePgsql enables the database service.
// This function is meant for installation or migration, to enable and start the service.
func EnablePgsql(systemd podman.Systemd) error {
if err := systemd.EnableService(podman.DBService); err != nil {
return utils.Errorf(err, L("cannot enable %s service"), podman.DBService)
}
return nil
}
// Upgrade updates the systemd service files and restarts the containers if needed.
func Upgrade(
preparedImage string,
systemd podman.Systemd,
) error {
if err := GeneratePgsqlSystemdService(systemd, preparedImage); err != nil {
return utils.Error(err, L("cannot generate systemd service"))
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
if err := EnablePgsql(systemd); err != nil {
return err
}
if err := systemd.StartService(podman.DBService); err != nil {
return err
}
cnx := shared.NewConnection("podman", podman.DBContainerName, "")
if err := cnx.WaitForHealthcheck(); err != nil {
return utils.Errorf(err, L("%s fails healtcheck"), podman.DBContainerName)
}
return nil
}
// GeneratePgsqlSystemdService creates the DB container systemd files.
func GeneratePgsqlSystemdService(
systemd podman.Systemd,
image string,
) error {
pgsqlData := templates.PgsqlServiceTemplateData{
Volumes: utils.PgsqlRequiredVolumeMounts,
Ports: utils.DBPorts,
NamePrefix: "uyuni",
Network: podman.UyuniNetwork,
Image: image,
CaSecret: podman.DBCASecret,
CaPath: ssl.DBCAContainerPath,
CertSecret: podman.DBSSLCertSecret,
CertPath: ssl.DBCertPath,
KeySecret: podman.DBSSLKeySecret,
KeyPath: ssl.DBCertKeyPath,
AdminUser: podman.DBAdminUserSecret,
AdminPassword: podman.DBAdminPassSecret,
ManagerUser: podman.DBUserSecret,
ManagerPassword: podman.DBPassSecret,
ReportUser: podman.ReportDBUserSecret,
ReportPassword: podman.ReportDBPassSecret,
}
if err := utils.WriteTemplateToFile(
pgsqlData, podman.GetServicePath(podman.DBService), 0555, true,
); err != nil {
return utils.Error(err, L("failed to generate systemd service unit file"))
}
environment := fmt.Sprintf("Environment=UYUNI_IMAGE=%s\n", image)
if err := podman.GenerateSystemdConfFile(podman.DBService, "generated.conf", environment, true); err != nil {
return utils.Error(err, L("cannot generate systemd configuration file"))
}
return systemd.ReloadDaemon(false)
}
070701000000a8000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001400000000mgradm/shared/pgsql070701000000a9000081a400000000000000000000000168ed21dd00006b74000000000000000000000000000000000000001f00000000mgradm/shared/podman/podman.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/coco"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/hub"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/pgsql"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/saline"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var systemd podman.Systemd = podman.NewSystemd()
// GetExposedPorts returns the port exposed.
func GetExposedPorts(debug bool) []types.PortMap {
ports := utils.GetServerPorts(debug)
ports = append(ports, utils.NewPortMap(utils.WebServiceName, "https", 443, 443))
ports = append(ports, utils.TCPPodmanPorts...)
return ports
}
// GenerateServerSystemdService creates the server systemd service file.
func GenerateServerSystemdService(mirrorPath string, debug bool) error {
ipv6Enabled := podman.HasIpv6Enabled(podman.UyuniNetwork)
args := podman.GetCommonParams()
if mirrorPath != "" {
args = append(args, "-v", mirrorPath+":/mirror")
}
ports := GetExposedPorts(debug)
if _, err := exec.LookPath("csp-billing-adapter"); err == nil {
ports = append(ports, utils.NewPortMap("csp", "csp-billing", 18888, 18888))
args = append(args, "-e ISPAYG=1")
}
data := templates.PodmanServiceTemplateData{
Volumes: utils.ServerVolumeMounts,
NamePrefix: "uyuni",
Args: strings.Join(args, " "),
Ports: ports,
Network: podman.UyuniNetwork,
IPV6Enabled: ipv6Enabled,
CaSecret: podman.CASecret,
CaPath: ssl.CAContainerPath,
CertSecret: podman.SSLCertSecret,
CertPath: ssl.ServerCertPath,
KeySecret: podman.SSLKeySecret,
KeyPath: ssl.ServerCertKeyPath,
DBCaSecret: podman.DBCASecret,
DBCaPath: ssl.DBCAContainerPath,
}
if err := utils.WriteTemplateToFile(data, podman.GetServicePath("uyuni-server"), 0555, true); err != nil {
return utils.Errorf(err, L("failed to generate systemd service unit file"))
}
return nil
}
// GenerateSystemdService creates a server systemd file.
func GenerateSystemdService(
systemd podman.Systemd,
tz string,
image string,
debug bool,
mirrorPath string,
podmanArgs []string,
) error {
err := podman.SetupNetwork(false)
if err != nil {
return utils.Errorf(err, L("cannot setup network"))
}
log.Info().Msg(L("Enabling system service"))
if err := GenerateServerSystemdService(mirrorPath, debug); err != nil {
return err
}
if err := podman.GenerateSystemdConfFile("uyuni-server", "generated.conf",
"Environment=UYUNI_IMAGE="+image, true,
); err != nil {
return utils.Errorf(err, L("cannot generate systemd conf file"))
}
config := fmt.Sprintf(`Environment=TZ=%s
Environment="PODMAN_EXTRA_ARGS=%s"
`, strings.TrimSpace(tz), strings.Join(podmanArgs, " "))
if err := podman.GenerateSystemdConfFile("uyuni-server", podman.CustomConf, config, false); err != nil {
return utils.Errorf(err, L("cannot generate systemd user configuration file"))
}
return systemd.ReloadDaemon(false)
}
// RunMigration migrate an existing remote server to a container.
func RunMigration(
preparedImage string,
sshAuthSocket string,
sshConfigPath string,
sshKnownhostsPath string,
sourceFqdn string,
user string,
prepare bool,
) (*utils.InspectResult, error) {
scriptDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return nil, err
}
script, err := adm_utils.GenerateMigrationScript(
sourceFqdn,
user,
false,
prepare,
"uyuni-pgsql-server.mgr.internal",
"uyuni-pgsql-server.mgr.internal",
)
if err != nil {
return nil, utils.Errorf(err, L("cannot generate migration script"))
}
extraArgs := []string{
"--security-opt", "label=disable",
"-e", "SSH_AUTH_SOCK",
"-v", filepath.Dir(sshAuthSocket) + ":" + filepath.Dir(sshAuthSocket),
"-v", scriptDir + ":/var/lib/uyuni-tools/",
}
if sshConfigPath != "" {
extraArgs = append(extraArgs, "-v", sshConfigPath+":/tmp/ssh_config")
}
if sshKnownhostsPath != "" {
extraArgs = append(extraArgs, "-v", sshKnownhostsPath+":/etc/ssh/ssh_known_hosts")
}
log.Info().Msg(L("Migrating server"))
if err := podman.RunContainer("uyuni-migration", preparedImage, utils.ServerMigrationVolumeMounts, extraArgs,
[]string{"bash", "-e", "-c", script}); err != nil {
return nil, utils.Errorf(err, L("cannot run uyuni migration container"))
}
// now that everything is migrated, we need to fix SELinux permission
if err := restoreSELinuxContext(); err != nil {
return nil, err
}
dataPath := path.Join(scriptDir, "data")
data, err := os.ReadFile(dataPath)
if err != nil {
log.Fatal().Err(err).Msgf(L("Failed to read file %s"), dataPath)
}
extractedData, err := utils.ReadInspectData[utils.InspectResult](data)
if err != nil {
return nil, utils.Errorf(err, L("cannot read extracted data"))
}
return extractedData, nil
}
func restoreSELinuxContext() error {
if utils.IsInstalled("restorecon") {
for _, volumeMount := range utils.ServerVolumeMounts {
mountPoint, err := GetMountPoint(volumeMount.Name)
if err != nil {
return utils.Errorf(err, L("cannot inspect volume %s"), volumeMount)
}
if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "restorecon", "-F", "-r", "-v", mountPoint); err != nil {
return utils.Errorf(err, L("cannot restore %s SELinux permissions"), mountPoint)
}
}
}
return nil
}
// RunPgsqlVersionUpgrade perform a PostgreSQL major upgrade.
func RunPgsqlVersionUpgrade(
authFile string,
registry string,
image types.ImageFlags,
upgradeImage types.ImageFlags,
oldPgsql string,
newPgsql string,
) error {
log.Info().Msgf(
L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), oldPgsql, newPgsql,
)
if newPgsql > oldPgsql {
pgsqlVersionUpgradeContainer := "uyuni-upgrade-pgsql"
extraArgs := []string{
"--security-opt", "label=disable",
}
upgradeImageURL := ""
var err error
if upgradeImage.Name == "" {
upgradeImageURL, err = utils.ComputeImage(registry, utils.DefaultTag, image,
fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql))
if err != nil {
return utils.Errorf(err, L("failed to compute image URL"))
}
} else {
upgradeImageURL, err = utils.ComputeImage(registry, image.Tag, upgradeImage)
if err != nil {
return utils.Errorf(err, L("failed to compute image URL"))
}
}
preparedImage, err := podman.PrepareImage(authFile, upgradeImageURL, image.PullPolicy, true)
if err != nil {
return err
}
log.Info().Msgf(L("Using database upgrade image %s"), preparedImage)
// We need an aditional volume for database backup during the migration
// Create or reuse var-pgsql-backup volume
volumeMounts := append(utils.PgsqlRequiredVolumeMounts,
types.VolumeMount{MountPath: "/var/lib/pgsql/data-backup", Name: "var-pgsql-backup"})
script, err := adm_utils.GeneratePgsqlVersionUpgradeScript(
oldPgsql, newPgsql, "/var/lib/pgsql/data-backup")
if err != nil {
return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script"))
}
err = podman.RunContainer(pgsqlVersionUpgradeContainer, preparedImage, volumeMounts, extraArgs,
[]string{"bash", "-e", "-c", script})
if err != nil {
return err
}
}
return nil
}
// RunPgsqlFinalizeScript run the script with all the action required to a db after upgrade.
func RunPgsqlFinalizeScript(serverImage string, schemaUpdateRequired bool, migration bool, collationChange bool) error {
if !schemaUpdateRequired && !migration && !collationChange {
log.Info().Msg(L("No need to run database finalization script"))
return nil
}
extraArgs := []string{
"--security-opt", "label=disable",
"--network", podman.UyuniNetwork,
}
pgsqlFinalizeContainer := "uyuni-finalize-pgsql"
script, err := adm_utils.GenerateFinalizePostgresScript(collationChange, schemaUpdateRequired, migration, false)
if err != nil {
return utils.Errorf(err, L("cannot generate PostgreSQL finalization script"))
}
return podman.RunContainer(pgsqlFinalizeContainer, serverImage, utils.ServerVolumeMounts, extraArgs,
[]string{"bash", "-e", "-c", script})
}
// RunPostUpgradeScript run the script with the changes to apply after the upgrade.
func RunPostUpgradeScript(serverImage string) error {
postUpgradeContainer := "uyuni-post-upgrade"
extraArgs := []string{
"--security-opt", "label=disable",
}
script, err := adm_utils.GeneratePostUpgradeScript()
if err != nil {
return utils.Errorf(err, L("cannot generate PostgreSQL finalization script"))
}
// Post upgrade script expects some commands to fail and checks their result, don't use sh -e.
return podman.RunContainer(postUpgradeContainer, serverImage, utils.ServerVolumeMounts, extraArgs,
[]string{"bash", "-c", script})
}
// Upgrade will upgrade server to the image given as attribute.
func Upgrade(
systemd podman.Systemd,
authFile string,
registry string,
db adm_utils.DBFlags,
reportdb adm_utils.DBFlags,
ssl adm_utils.InstallSSLFlags,
image types.ImageFlags,
upgradeImage types.ImageFlags,
cocoFlags adm_utils.CocoFlags,
hubXmlrpcFlags adm_utils.HubXmlrpcFlags,
salineFlags adm_utils.SalineFlags,
pgsqlFlags types.PgsqlFlags,
tz string,
) error {
// Calling cloudguestregistryauth only makes sense if using the cloud provider registry.
// This check assumes users won't use custom registries that are not the cloud provider one on a cloud image.
if !strings.HasPrefix(registry, "registry.suse.com") {
if err := CallCloudGuestRegistryAuth(); err != nil {
return err
}
}
// Prepare Uyuni network, migration container needs to run in the same network as resulting image
err := podman.SetupNetwork(false)
if err != nil {
return utils.Errorf(err, L("cannot setup network"))
}
fqdn, err := utils.GetFqdn([]string{})
if err != nil {
return err
}
preparedServerImage, preparedPgsqlImage, err := podman.PrepareImages(authFile, image, pgsqlFlags)
if err != nil {
return utils.Errorf(err, L("cannot prepare images"))
}
inspectedValues, err := prepareHost(preparedServerImage, preparedPgsqlImage)
if err != nil {
return err
}
if systemd.HasService(podman.ServerService) {
if err := systemd.StopService(podman.ServerService); err != nil {
return utils.Errorf(err, L("cannot stop service"))
}
defer func() {
err = systemd.StartService(podman.ServerService)
}()
}
if systemd.HasService(podman.DBService) {
if err := systemd.StopService(podman.DBService); err != nil {
return utils.Errorf(err, L("cannot stop service"))
}
defer func() {
err = systemd.StartService(podman.DBService)
}()
}
oldPgVersion, _ := strconv.Atoi(inspectedValues.CommonInspectData.CurrentPgVersion)
newPgVersion, _ := strconv.Atoi(inspectedValues.DBInspectData.ImagePgVersion)
if inspectedValues.CommonInspectData.CurrentPgVersionNotMigrated != "" ||
inspectedValues.DBHost == "localhost" ||
inspectedValues.ReportDBHost == "localhost" {
log.Info().Msgf(L("Configuring split PostgreSQL container. Image version: %[1]d, not migrated version: %[2]d"),
newPgVersion, oldPgVersion)
if err := configureSplitDBContainer(
preparedServerImage, preparedPgsqlImage, systemd, db, reportdb, ssl, tz, fqdn); err != nil {
return utils.Errorf(err, L("cannot configure db container"))
}
}
if newPgVersion > oldPgVersion {
if err := RunPgsqlVersionUpgrade(
authFile, registry, image, upgradeImage, strconv.Itoa(oldPgVersion),
strconv.Itoa(newPgVersion),
); err != nil {
return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script"))
}
} else if newPgVersion == oldPgVersion {
log.Info().Msg(L("Upgrading without changing PostgreSQL version"))
} else {
return fmt.Errorf(
L("trying to downgrade PostgreSQL from %[1]s to %[2]s"),
oldPgVersion, newPgVersion,
)
}
if err := pgsql.Upgrade(preparedPgsqlImage, systemd); err != nil {
return err
}
schemaUpdateRequired := oldPgVersion != newPgVersion
collationChange := inspectedValues.CurrentLibcVersion != inspectedValues.ImageLibcVersion
if err := RunPgsqlFinalizeScript(preparedServerImage, schemaUpdateRequired, false, collationChange); err != nil {
return utils.Errorf(err, L("cannot run PostgreSQL finalize script"))
}
if err := RunPostUpgradeScript(preparedServerImage); err != nil {
return utils.Errorf(err, L("cannot run post upgrade script"))
}
if err := podman.CleanSystemdConfFile("uyuni-server"); err != nil {
return err
}
if err := podman.GenerateSystemdConfFile("uyuni-server", "generated.conf",
"Environment=UYUNI_IMAGE="+preparedServerImage, true,
); err != nil {
return err
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
if err := UpdateServerSystemdService(); err != nil {
return err
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
log.Info().Msg(L("Waiting for the server to start…"))
cnx := shared.NewConnection("podman", podman.ServerContainerName, "")
if err := systemd.StartService(podman.ServerService); err != nil {
return utils.Error(err, L("cannot start service"))
}
if err := cnx.WaitForHealthcheck(); err != nil {
log.Warn().Err(err)
}
inspectedDB := adm_utils.DBFlags{
Name: inspectedValues.DBName,
Port: inspectedValues.DBPort,
User: inspectedValues.DBUser,
Password: inspectedValues.DBPassword,
Host: db.Host,
}
err = coco.Upgrade(systemd, authFile, registry, cocoFlags, image, inspectedDB)
if err != nil {
return utils.Errorf(err, L("error upgrading confidential computing service."))
}
if err := hub.Upgrade(
systemd, authFile, registry, image.PullPolicy, image.Tag, hubXmlrpcFlags,
); err != nil {
return err
}
if err := saline.Upgrade(systemd, authFile, registry, salineFlags, image, utils.GetLocalTimezone()); err != nil {
return utils.Errorf(err, L("error upgrading saline service."))
}
return systemd.ReloadDaemon(false)
}
func WaitForSystemStart(
systemd podman.Systemd,
cnx *shared.Connection,
image string,
tz string,
debug bool,
mirrorPath string,
podmanArgs []string,
) error {
err := GenerateSystemdService(
systemd, tz, image, debug, mirrorPath, podmanArgs,
)
if err != nil {
return err
}
log.Info().Msg(L("Waiting for the server to start…"))
if err := systemd.EnableService(podman.ServerService); err != nil {
return utils.Error(err, L("cannot enable service"))
}
return cnx.WaitForHealthcheck()
}
// Migrate will migrate a server to the image given as attribute.
func Migrate(
systemd podman.Systemd,
authFile string,
registry string,
db adm_utils.DBFlags,
reportdb adm_utils.DBFlags,
ssl adm_utils.InstallSSLFlags,
image types.ImageFlags,
upgradeImage types.ImageFlags,
cocoFlags adm_utils.CocoFlags,
hubXmlrpcFlags adm_utils.HubXmlrpcFlags,
salineFlags adm_utils.SalineFlags,
pgsqlFlags types.PgsqlFlags,
prepare bool,
user string,
mirror string,
podmanArgs podman.PodmanFlags,
args []string,
) error {
// Calling cloudguestregistryauth only makes sense if using the cloud provider registry.
// This check assumes users won't use custom registries that are not the cloud provider one on a cloud image.
if !strings.HasPrefix(registry, "registry.suse.com") {
if err := CallCloudGuestRegistryAuth(); err != nil {
return err
}
}
sourceFqdn, err := utils.GetFqdn(args)
if err != nil {
return err
}
// Prepare Uyuni network, migration container needs to run in the same network as resulting image
err = podman.SetupNetwork(false)
if err != nil {
return utils.Errorf(err, L("cannot setup network"))
}
// Find the SSH Socket and paths for the migration
sshAuthSocket := GetSSHAuthSocket()
sshConfigPath, sshKnownhostsPath := GetSSHPaths()
preparedServerImage, preparedPgsqlImage, err := podman.PrepareImages(authFile, image, pgsqlFlags)
if err != nil {
return utils.Errorf(err, L("cannot prepare images"))
}
if err := stopService(systemd, podman.ServerService); err != nil {
return err
}
if err := stopService(systemd, podman.DBService); err != nil {
return err
}
inspectedValues, err := RunMigration(
preparedServerImage, sshAuthSocket, sshConfigPath, sshKnownhostsPath, sourceFqdn,
user, prepare,
)
if err != nil {
return utils.Errorf(err, L("cannot run migration script"))
}
if prepare {
log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration."))
return nil
}
dbData, err := podman.ContainerInspect[utils.DBInspectData](
preparedPgsqlImage, utils.PgsqlRequiredVolumeMounts, utils.NewDBInspector(),
)
if err != nil {
return utils.Errorf(err, L("failed to inspect database container image"))
}
inspectedValues.DBInspectData = *dbData
oldPgVersion, _ := strconv.Atoi(inspectedValues.CurrentPgVersion)
newPgVersion, _ := strconv.Atoi(inspectedValues.DBInspectData.ImagePgVersion)
log.Info().Msgf(L("Configuring split PostgreSQL container. Image version: %[1]d, not migrated version: %[2]d"),
newPgVersion, oldPgVersion)
if err := upgradeDB(newPgVersion, oldPgVersion, upgradeImage, authFile, registry, image); err != nil {
return err
}
db.Admin.User = inspectedValues.DBUser
db.Admin.Password = inspectedValues.DBPassword
db.User = inspectedValues.DBUser
db.Password = inspectedValues.DBPassword
reportdb.User = inspectedValues.ReportDBUser
reportdb.Password = inspectedValues.ReportDBPassword
if err := configureSplitDBContainer(
preparedServerImage, preparedPgsqlImage, systemd, db, reportdb, ssl, inspectedValues.Timezone, sourceFqdn,
); err != nil {
return utils.Errorf(err, L("cannot configure db container"))
}
// At this point we should have all certificates in the secrets form, we can remove temporary volume
if err := podman.DeleteVolume(utils.EtcTLSTmpVolumeMount.Name, false); err != nil {
log.Warn().Err(err).Msg(L("cannot remove temporary etc-tls volume"))
}
if err := pgsql.Upgrade(preparedPgsqlImage, systemd); err != nil {
return err
}
schemaUpdateRequired := oldPgVersion != newPgVersion
// The collation is based on glibc. A version change of libc needs a collation update and may be a reindex.
collactionChange := inspectedValues.CurrentLibcVersion != inspectedValues.ImageLibcVersion
if err := RunPgsqlFinalizeScript(preparedServerImage, schemaUpdateRequired, true, collactionChange); err != nil {
return utils.Errorf(err, L("cannot run PostgreSQL finalize script"))
}
if err := RunPostUpgradeScript(preparedServerImage); err != nil {
return utils.Errorf(err, L("cannot run post upgrade script"))
}
cnx := shared.NewConnection("podman", podman.ServerContainerName, "")
if err := WaitForSystemStart(
systemd, cnx, preparedServerImage, inspectedValues.Timezone, inspectedValues.Debug, mirror, podmanArgs.Args,
); err != nil {
return utils.Error(err, L("cannot wait for system start"))
}
inspectedDB := adm_utils.DBFlags{
Name: inspectedValues.DBName,
Port: inspectedValues.DBPort,
User: inspectedValues.DBUser,
Password: inspectedValues.DBPassword,
Host: db.Host,
}
err = coco.Upgrade(systemd, authFile, registry, cocoFlags, image, inspectedDB)
if err != nil {
return utils.Errorf(err, L("error upgrading confidential computing service."))
}
// Automatically set a replica if Hub XMLRPC API service was running on the migrated server.
if inspectedValues.HasHubXmlrpcAPI && !hubXmlrpcFlags.IsChanged {
hubXmlrpcFlags.Replicas = 1
}
if err := hub.Upgrade(
systemd, authFile, registry, image.PullPolicy, image.Tag, hubXmlrpcFlags,
); err != nil {
return err
}
if err := saline.Upgrade(systemd, authFile, registry, salineFlags, image, utils.GetLocalTimezone()); err != nil {
return utils.Errorf(err, L("error upgrading saline service."))
}
return systemd.ReloadDaemon(false)
}
func stopService(systemd podman.Systemd, name string) error {
if systemd.HasService(name) {
if err := systemd.StopService(name); err != nil {
return utils.Error(err, L("cannot stop service"))
}
defer func() {
_ = systemd.StartService(name)
}()
}
return nil
}
var runCmdOutput = utils.RunCmdOutput
func hasDebugPorts(definition []byte) bool {
return regexp.MustCompile(`-p 8003:8003`).Match(definition)
}
func getMirrorPath(definition []byte) string {
mirrorPath := ""
finder := regexp.MustCompile(`-v +([^:]+):/mirror[[:space:]]`)
submatches := finder.FindStringSubmatch(string(definition))
if len(submatches) == 2 {
mirrorPath = submatches[1]
}
return mirrorPath
}
// UpdateServerSystemdService refreshes the server systemd service file.
func UpdateServerSystemdService() error {
out, err := runCmdOutput(zerolog.DebugLevel, "systemctl", "cat", podman.ServerService)
if err != nil {
return utils.Errorf(err, "failed to get %s systemd service definition", podman.ServerService)
}
return GenerateServerSystemdService(getMirrorPath(out), hasDebugPorts(out))
}
// RunPgsqlContainerMigration migrate to separate postgres container.
func RunPgsqlContainerMigration(serverImage string, dbHost string, reportDBHost string) error {
data := templates.PgsqlMigrateScriptTemplateData{
DBHost: dbHost,
ReportDBHost: reportDBHost,
}
scriptBuilder := new(strings.Builder)
if err := data.Render(scriptBuilder); err != nil {
return utils.Error(err, L("failed to generate postgresql migration script"))
}
podmanArgs := []string{
"--security-opt", "label=disable",
}
return podman.RunContainer("uyuni-db-migrate", serverImage, utils.DatabaseMigrationVolumeMounts, podmanArgs,
[]string{"bash", "-e", "-c", scriptBuilder.String()})
}
// RunPgsqlContainerMigration migrate to separate postgres container.
func RunConfigPgsl(pgsqlImage string) error {
podmanArgs := []string{
"--security-opt", "label=disable",
"--entrypoint", "/docker-entrypoint-initdb.d/uyuni-postgres-config.sh",
}
if err := podman.RunContainer("uyuni-db-config", pgsqlImage, utils.PgsqlRequiredVolumeMounts,
podmanArgs, []string{}); err != nil {
return err
}
return systemd.RestartService(podman.DBService)
}
// CallCloudGuestRegistryAuth calls cloudguestregistryauth if it is available.
func CallCloudGuestRegistryAuth() error {
cloudguestregistryauth := "cloudguestregistryauth"
path, err := exec.LookPath(cloudguestregistryauth)
if err == nil {
if err := utils.RunCmdStdMapping(zerolog.DebugLevel, path); err != nil && isPAYG() {
// Not being registered against the cloud registry is not an error on BYOS.
return err
} else if err != nil {
log.Info().Msg(L("The above error is only relevant if using a public cloud provider registry"))
}
}
// silently ignore error if it is missing
return nil
}
func isPAYG() bool {
flavorCheckPath := "/usr/bin/instance-flavor-check"
if utils.FileExists(flavorCheckPath) {
out, _ := utils.RunCmdOutput(zerolog.DebugLevel, flavorCheckPath)
return strings.TrimSpace(string(out)) == "PAYG"
}
return false
}
// GetMountPoint return folder where a given volume is mounted.
func GetMountPoint(volumeName string) (string, error) {
args := []string{"volume", "inspect", "--format", "{{.Mountpoint}}", volumeName}
mountPoint, err := utils.RunCmdOutput(zerolog.DebugLevel, "podman", args...)
if err != nil {
return "", err
}
return strings.TrimSuffix(string(mountPoint), "\n"), nil
}
// GetSSHAuthSocket returns the SSH_AUTH_SOCK environment variable value.
func GetSSHAuthSocket() string {
path := os.Getenv("SSH_AUTH_SOCK")
if len(path) == 0 {
log.Fatal().Msg(L("SSH_AUTH_SOCK is not defined, start an SSH agent and try again"))
}
return path
}
// GetSSHPaths returns the user SSH config and known_hosts paths.
func GetSSHPaths() (string, string) {
// Find ssh config to mount it in the container
homedir, err := os.UserHomeDir()
if err != nil {
log.Fatal().Msg(L("Failed to find home directory to look for SSH config"))
}
sshConfigPath := filepath.Join(homedir, ".ssh", "config")
sshKnownhostsPath := filepath.Join(homedir, ".ssh", "known_hosts")
if !utils.FileExists(sshConfigPath) {
sshConfigPath = ""
}
if !utils.FileExists(sshKnownhostsPath) {
sshKnownhostsPath = ""
}
return sshConfigPath, sshKnownhostsPath
}
func prepareHost(
preparedServerImage string,
preparedPgsqlImage string,
) (*utils.ServerInspectData, error) {
inspectedValues, err := podman.Inspect(preparedServerImage, preparedPgsqlImage)
if err != nil {
return nil, utils.Errorf(err, L("cannot inspect podman values"))
}
runningServerImage := podman.GetServiceImage(podman.ServerService)
runningDBImage := runningServerImage
if systemd.HasService(podman.DBService) {
runningDBImage = podman.GetServiceImage(podman.DBService)
}
var runningData *utils.ServerInspectData
if runningServerImage != "" && runningDBImage != "" {
runningData, err = podman.Inspect(runningServerImage, runningDBImage)
if err != nil {
return inspectedValues, err
}
}
return inspectedValues, adm_utils.SanityCheck(runningData, inspectedValues)
}
func upgradeDB(
newPgVersion int,
oldPgVersion int,
upgradeImage types.ImageFlags,
authFile string,
registry string,
image types.ImageFlags,
) error {
if newPgVersion > oldPgVersion {
if err := RunPgsqlVersionUpgrade(
authFile, registry, image, upgradeImage, strconv.Itoa(oldPgVersion),
strconv.Itoa(newPgVersion),
); err != nil {
return utils.Error(err, L("cannot run PostgreSQL version upgrade script"))
}
} else if newPgVersion == oldPgVersion {
log.Info().Msg(L("Upgrading without changing PostgreSQL version"))
} else {
return fmt.Errorf(
L("trying to downgrade PostgreSQL from %[1]s to %[2]s"),
oldPgVersion, newPgVersion,
)
}
return nil
}
func configureSplitDBContainer(
serverImage string,
pgsqlImage string,
systemd podman.Systemd,
db adm_utils.DBFlags,
reportdb adm_utils.DBFlags,
ssl adm_utils.InstallSSLFlags,
tz string,
fqdn string,
) error {
if err := PrepareSSLCertificates(serverImage, &ssl, tz, fqdn); err != nil {
return err
}
if err := RunPgsqlContainerMigration(serverImage, "db", "reportdb"); err != nil {
return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script"))
}
// Create all the database credentials secrets
if err := podman.CreateCredentialsSecrets(
podman.DBUserSecret, db.User,
podman.DBPassSecret, db.Password,
); err != nil {
return err
}
if err := podman.CreateCredentialsSecrets(
podman.ReportDBUserSecret, reportdb.User,
podman.ReportDBPassSecret, reportdb.Password,
); err != nil {
return err
}
if db.IsLocal() {
// The admin password is not needed for external databases
if err := podman.CreateCredentialsSecrets(
podman.DBAdminUserSecret, db.Admin.User,
podman.DBAdminPassSecret, db.Admin.Password,
); err != nil {
return err
}
// Run the DB container setup if the user doesn't set a custom host name for it.
if err := pgsql.SetupPgsql(systemd, pgsqlImage); err != nil {
return err
}
} else {
log.Info().Msgf(
L("Skipped database container setup to use external database %s"),
db.Host,
)
}
return RunConfigPgsl(pgsqlImage)
}
070701000000aa000081a400000000000000000000000168ed21dd000008fc000000000000000000000000000000000000002400000000mgradm/shared/podman/podman_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"testing"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
)
func TestHasDebugPorts(t *testing.T) {
data := map[string]bool{
`[Service]
ExecStart=/bin/sh -c '/usr/bin/podman run \
--name uyuni-server \
--hostname uyuni-server.mgr.internal \
--rm --cap-add NET_RAW --tmpfs /run -v cgroup:/sys/fs/cgroup:rw \
-p 80:80 \
-p [::]:80:80 \
-p 8003:8003 \
-p [::]:8003:8003 \
-p 4505:4505 \
-p [::]:4505:4505`: true,
`[Service]
ExecStart=/bin/sh -c '/usr/bin/podman run \
--name uyuni-server \
--hostname uyuni-server.mgr.internal \
--rm --cap-add NET_RAW --tmpfs /run -v cgroup:/sys/fs/cgroup:rw \
-p 80:80 \
-p [::]:80:80 \
-p [::]:8003:8003 \
-p 4505:4505 \
-p [::]:4505:4505`: false,
}
for definition, expected := range data {
actual := hasDebugPorts([]byte(definition))
testutils.AssertEquals(t, "Unexpected result for "+definition, expected, actual)
}
}
func TestGetMirrorPath(t *testing.T) {
data := map[string]string{
`[Service]
ExecStart=/bin/sh -c '/usr/bin/podman run \
--name uyuni-server \
--hostname uyuni-server.mgr.internal \
--rm --cap-add NET_RAW --tmpfs /run -v cgroup:/sys/fs/cgroup:rw \
-p 80:80 \
-p 4505:4505 \
-p [::]:4505:4505`: "",
`[Service]
ExecStart=/bin/sh -c '/usr/bin/podman run \
--name uyuni-server \
--hostname uyuni-server.mgr.internal \
--rm --cap-add NET_RAW --tmpfs /run -v cgroup:/sys/fs/cgroup:rw \
-v /path/to/mirror:/mirror \
-p 80:80 \
-p 4505:4505 \
-p [::]:4505:4505`: "/path/to/mirror",
`[Service]
ExecStart=/bin/sh -c '/usr/bin/podman run \
--name uyuni-server \
--hostname uyuni-server.mgr.internal \
--rm --cap-add NET_RAW -v /path/to/mirror:/mirror --tmpfs /run -v cgroup:/sys/fs/cgroup:rw \
-p 80:80 \
-p 4505:4505 \
-p [::]:4505:4505`: "/path/to/mirror",
}
for definition, expected := range data {
actual := getMirrorPath([]byte(definition))
testutils.AssertEquals(t, "Unexpected result for "+definition, expected, actual)
}
}
070701000000ab000081a400000000000000000000000168ed21dd0000418d000000000000000000000000000000000000001c00000000mgradm/shared/podman/ssl.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"errors"
"fmt"
"os"
"path"
"strings"
"github.com/rs/zerolog/log"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func prepareThirdPartyCertificate(caChain *types.CaChain, pair *types.SSLPair, outDir string, fqdns ...string) error {
// OrderCas checks the chain of certificates to report problems early
// We also sort the certificates of the chain in a single blob for Apache and PostgreSQL
var orderedCert, rootCA []byte
var err error
if orderedCert, rootCA, err = ssl.OrderCas(caChain, pair); err != nil {
return err
}
// Check that the private key is not encrypted
if err := ssl.CheckKey(pair.Key); err != nil {
return err
}
if err := os.Mkdir(outDir, 0600); err != nil {
return err
}
// Write the ordered cert and Root CA to temp files
caPath := path.Join(outDir, "ca.crt")
if err = os.WriteFile(caPath, rootCA, 0600); err != nil {
return err
}
serverCertPath := path.Join(outDir, "server.crt")
if err = os.WriteFile(serverCertPath, orderedCert, 0600); err != nil {
return err
}
errors := []error{}
for _, fqdn := range fqdns {
errors = append(errors, ssl.VerifyHostname(caPath, serverCertPath, fqdn))
}
return utils.JoinErrors(errors...)
}
var newRunner = utils.NewRunner
// PrepareSSLCertificates prepares SSL environment for the server and database.
// If 3rd party certificates are provided, it uses them, else new certificates are generated.
// This function is called in both new installation and upgrade scenarios.
func PrepareSSLCertificates(image string, sslFlags *adm_utils.InstallSSLFlags, tz string, fqdn string) error {
// Prepare Server certificates
if err := prepareServerSSLcertificates(image, sslFlags, tz, fqdn); err != nil {
return err
}
// Prepare database certificates
if err := prepareDatabaseSSLcertificates(image, sslFlags, tz, fqdn); err != nil {
return err
}
return nil
}
func validateCA(image string, sslFlags *adm_utils.InstallSSLFlags, tz string) error {
tempDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return err
}
env := map[string]string{
"CERT_PASS": sslFlags.Password,
}
if err := runSSLContainer(sslValidateCA, tempDir, image, tz, env); err != nil {
return utils.Error(err, L("CA validation failed!"))
}
return nil
}
func prepareServerSSLcertificates(image string, sslFlags *adm_utils.InstallSSLFlags, tz string, fqdn string) error {
tempDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return err
}
// Check for provided certificates
if sslFlags.UseProvided() {
log.Info().Msg(L("Using provided 3rd party server certificates"))
ca := &sslFlags.Ca
pair := &sslFlags.Server
serverDir := path.Join(tempDir, "server")
if err := prepareThirdPartyCertificate(ca, pair, serverDir); err != nil {
return err
}
// Create secrets for CA
return shared_podman.CreateTLSSecrets(
shared_podman.CASecret, path.Join(serverDir, "ca.crt"),
shared_podman.SSLCertSecret, path.Join(serverDir, "server.crt"),
shared_podman.SSLKeySecret, pair.Key,
)
}
// Check if this is an upgrade scenario and there is existing CA
if reused, err := reuseExistingCertificates(image, tempDir, fqdn, false); reused && err == nil {
// We successfully loaded existing certificates
return nil
} else if reused && err != nil {
// We found certificates, but there was trouble loading it
return err
}
// Not provided and not an upgrade, generate new
return generateServerCertificate(image, sslFlags, tz, fqdn)
}
func prepareDatabaseSSLcertificates(image string, sslFlags *adm_utils.InstallSSLFlags, tz string, fqdn string) error {
// Write the ordered cert and Root CA to temp files
tempDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return err
}
if sslFlags.UseProvidedDB() {
log.Info().Msg(L("Using provided 3rd party database certificates"))
dbCa := &sslFlags.DB.CA
dbPair := &sslFlags.DB.SSLPair
dbDir := path.Join(tempDir, "db")
if err := prepareThirdPartyCertificate(dbCa, dbPair, dbDir, fqdn, "db", "reportdb"); err != nil {
return err
}
// Create secrets for the database key and certificate
return shared_podman.CreateTLSSecrets(
shared_podman.DBCASecret, path.Join(dbDir, "ca.crt"),
shared_podman.DBSSLCertSecret, path.Join(dbDir, "server.crt"),
shared_podman.DBSSLKeySecret, dbPair.Key,
)
}
// Check if this is an upgrade scenario and there is existing CA
if reused, err := reuseExistingCertificates(image, tempDir, fqdn, true); reused && err == nil {
// We successfully loaded existing certificates
return nil
} else if reused && err != nil {
// We found certificates, but there was trouble loading it
return err
}
// Not provided and not an upgrade, generate new
return generateDatabaseCertificate(image, sslFlags, tz, fqdn)
}
func reuseExistingCertificates(image string, tempDir string, fqdn string, isDatabaseCheck bool) (bool, error) {
// Upgrading from 5.1+ with all certificates as secrets
if reuseExistingCertificatesFromSecrets(isDatabaseCheck) {
secretName := shared_podman.SSLCertSecret
if isDatabaseCheck {
secretName = shared_podman.DBSSLCertSecret
}
return isFQDNMatchingCertificateSecret(fqdn, secretName), nil
}
// Upgrading from 5.0- with all certs in files
return reuseExistingCertificatesFromMounts(image, tempDir, fqdn, isDatabaseCheck)
}
func isFQDNMatchingCertificateSecret(fqdn string, secretName string) bool {
cert, err := shared_podman.GetSecret(secretName)
if err != nil {
log.Error().Err(err).Send()
return false
}
return isFQDNMatchingCertificate(fqdn, cert)
}
func isFQDNMatchingCertificate(fqdn string, cert string) bool {
_, err := newRunner("openssl", "verify", "-verify_hostname", fqdn).InputString(cert).Exec()
return err == nil
}
func reuseExistingCertificatesFromSecrets(isDatabaseCheck bool) bool {
if isDatabaseCheck {
return shared_podman.HasSecret(shared_podman.DBCASecret) &&
shared_podman.HasSecret(shared_podman.DBSSLCertSecret) &&
shared_podman.HasSecret(shared_podman.DBSSLKeySecret)
}
return shared_podman.HasSecret(shared_podman.CASecret) &&
shared_podman.HasSecret(shared_podman.SSLCertSecret) &&
shared_podman.HasSecret(shared_podman.SSLKeySecret)
}
func reuseExistingCertificatesFromMounts(
image string,
tempDir string,
fqdn string,
isDatabaseCheck bool,
) (bool, error) {
caPath := path.Join(tempDir, "existing-ca.crt")
serverCert := path.Join(tempDir, "existing-server.crt")
serverKey := path.Join(tempDir, "existing-key.crt")
// No longer used by 5.1+, but contains existing certs in migration scenarios
etcTLSVolume := types.VolumeMount{Name: "etc-tls", MountPath: "/etc/pki/tls"}
// Paths for server side checking
volumes := append(utils.ServerVolumeMounts, etcTLSVolume)
caCheckPath := ssl.CAContainerPath
crtCheckPath := ssl.ServerCertPath
keyCheckPath := ssl.ServerCertKeyPath
if isDatabaseCheck {
// Path for database side checking.
// It is necessary to include etc-tls and ca-cert volume mounts to simulate non-split installation
volumes = append(utils.PgsqlRequiredVolumeMounts, etcTLSVolume, utils.CaCertVolumeMount)
caCheckPath = ssl.DBCAContainerPath
crtCheckPath = ssl.DBCertPath
keyCheckPath = ssl.DBCertKeyPath
}
const containerName = "uyuni-read-certs"
// Check if we have existing CA
rootCA, err := shared_podman.ReadFromContainer(containerName, image, volumes, nil,
caCheckPath)
if err != nil {
log.Info().Msgf(L("CA file %s not found. New CA and certificates will be created."), caCheckPath)
return false, nil
}
if err = os.WriteFile(caPath, rootCA, 0444); err != nil {
return true, utils.Error(err, L("cannot write existing CA certificate"))
}
// Check for server certificate
cert, err := shared_podman.ReadFromContainer(containerName, image, volumes, nil,
crtCheckPath)
if err != nil {
log.Info().Msgf(L("Cert file %s not found. A new certificate will be created."), crtCheckPath)
return false, nil
}
if err = os.WriteFile(serverCert, cert, 0444); err != nil {
return true, utils.Error(err, L("cannot write existing server certificate"))
}
// We cannot reuse certificates not matching the requested FQDN
if !isFQDNMatchingCertificate(fqdn, string(cert)) {
return false, nil
}
// Check for server certificate key
keyData, err := shared_podman.ReadFromContainer(containerName, image, volumes, nil,
keyCheckPath)
if err != nil {
log.Info().Msgf(L("Cert key file %s not found. A new certificate will be created."), keyCheckPath)
return false, nil
}
if err = os.WriteFile(serverKey, keyData, 0400); err != nil {
return true, utils.Error(err, L("cannot write existing server key"))
}
log.Info().Msg(L("Reusing existing certificates"))
if isDatabaseCheck {
return true, shared_podman.CreateTLSSecrets(
shared_podman.DBCASecret, caPath,
shared_podman.DBSSLCertSecret, serverCert,
shared_podman.DBSSLKeySecret, serverKey,
)
}
return true, shared_podman.CreateTLSSecrets(
shared_podman.CASecret, caPath,
shared_podman.SSLCertSecret, serverCert,
shared_podman.SSLKeySecret, serverKey,
)
}
func runSSLContainer(script string, workdir string, image string, tz string, env map[string]string) error {
envNames := []string{}
envValues := []string{}
for key, value := range env {
envNames = append(envNames, "-e", key)
envValues = append(envValues, fmt.Sprintf("%s=%s", key, value))
}
command := []string{
"run",
"--rm",
"--name", "uyuni-ssl-generator",
"--network", shared_podman.UyuniNetwork,
"-e", "TZ=" + tz,
"-v", utils.RootVolumeMount.Name + ":" + utils.RootVolumeMount.MountPath,
"-v", workdir + ":/ssl:z", // Bind mount for the generated certificates
}
command = append(command, envNames...)
command = append(command, image)
// Fail fast with `-e`.
command = append(command, "/usr/bin/sh", "-e", "-c", script)
_, err := newRunner("podman", command...).Env(envValues).StdMapping().Exec()
return err
}
func generateServerCertificate(image string, sslFlags *adm_utils.InstallSSLFlags, tz string, fqdn string) error {
// This generally should not happen, otherwise we would ask for CA password in parameters check.
// However there are some paths, e.g. upgrade reusing existing certs and db provided 3rd party certs where we
// do not check for the password, but on existing certs check we can fail and drop here.
if sslFlags.Password == "" {
return errors.New(L("Cannot generate new certificates without a CA password. Please check input options"))
}
tempDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return err
}
env := map[string]string{
"CERT_O": sslFlags.Org,
"CERT_OU": sslFlags.OU,
"CERT_CITY": sslFlags.City,
"CERT_STATE": sslFlags.State,
"CERT_COUNTRY": sslFlags.Country,
"CERT_EMAIL": sslFlags.Email,
"CERT_CNAMES": strings.Join(append([]string{fqdn}, sslFlags.Cnames...), " "),
"CERT_PASS": sslFlags.Password,
"HOSTNAME": fqdn,
}
if err := runSSLContainer(sslSetupServerScript, tempDir, image, tz, env); err != nil {
return utils.Error(err, L("Failed to generate server SSL certificates. Please check the input parameters."))
}
log.Info().Msg(L("Server SSL certificates generated"))
// Create secret for the database key and certificate
return shared_podman.CreateTLSSecrets(
shared_podman.CASecret, path.Join(tempDir, "ca.crt"),
shared_podman.SSLCertSecret, path.Join(tempDir, "server.crt"),
shared_podman.SSLKeySecret, path.Join(tempDir, "server.key"),
)
}
func generateDatabaseCertificate(image string, sslFlags *adm_utils.InstallSSLFlags, tz string, fqdn string) error {
// Write the ordered cert and Root CA to temp files
tempDir, cleaner, err := utils.TempDir()
defer cleaner()
if err != nil {
return err
}
if err := validateCA(image, sslFlags, tz); err != nil {
return utils.Error(err, L("Cannot generate database certificate"))
}
env := map[string]string{
"CERT_O": sslFlags.Org,
"CERT_OU": sslFlags.OU,
"CERT_CITY": sslFlags.City,
"CERT_STATE": sslFlags.State,
"CERT_COUNTRY": sslFlags.Country,
"CERT_EMAIL": sslFlags.Email,
"CERT_CNAMES": strings.Join(append([]string{fqdn}, sslFlags.Cnames...), " "),
"CERT_PASS": sslFlags.Password,
"HOSTNAME": fqdn,
}
if err := runSSLContainer(sslSetupDatabaseScript, tempDir, image, tz, env); err != nil {
return utils.Error(err, L("Failed to generate server Database SSL certificates. Please check the input parameters."))
}
log.Info().Msg(L("Database SSL certificates generated"))
// Create secret for the database key and certificate
if err := shared_podman.CreateTLSSecrets(
shared_podman.DBCASecret, path.Join(tempDir, "ca.crt"),
shared_podman.DBSSLCertSecret, path.Join(tempDir, "reportdb.crt"),
shared_podman.DBSSLKeySecret, path.Join(tempDir, "reportdb.key"),
); err != nil {
return err
}
return nil
}
const sslSetupServerScript = `
getMachineName() {
hostname="$1"
hostname=$(echo "$hostname" | sed 's/\*/_star_/g')
field_count=$(echo "$hostname" | awk -F. '{print NF}')
if [ "$field_count" -lt 3 ]; then
echo "$hostname"
return 0
fi
end_field=$(expr "$field_count" - 2)
result=$(echo "$hostname" | cut -d. -f1-"$end_field")
echo "$result"
}
# Only generate a CA is we don't have it yet
if ! test -e /root/ssl-build/RHN-ORG-TRUSTED-SSL-CERT; then
echo "Generating the self-signed SSL CA..."
mkdir -p /root/ssl-build
rhn-ssl-tool --gen-ca --force --dir /root/ssl-build \
--password "$CERT_PASS" \
--set-country "$CERT_COUNTRY" --set-state "$CERT_STATE" --set-city "$CERT_CITY" \
--set-org "$CERT_O" --set-org-unit "$CERT_OU" \
--set-common-name "$HOSTNAME" --cert-expiration 3650
fi
cp /root/ssl-build/RHN-ORG-TRUSTED-SSL-CERT /ssl/ca.crt
echo "Generate apache certificate..."
cert_args=""
for CERT_CNAME in $CERT_CNAMES; do
cert_args="$cert_args --set-cname $CERT_CNAME"
done
rhn-ssl-tool --gen-server --cert-expiration 3650 \
--dir /root/ssl-build --password "$CERT_PASS" \
--set-country "$CERT_COUNTRY" --set-state "$CERT_STATE" --set-city "$CERT_CITY" \
--set-org "$CERT_O" --set-org-unit "$CERT_OU" \
--set-hostname "$HOSTNAME" --cert-expiration 3650 --set-email "$CERT_EMAIL" \
$cert_args
MACHINE_NAME=$(getMachineName "$HOSTNAME")
cp "/root/ssl-build/$MACHINE_NAME/server.crt" /ssl/server.crt
cp "/root/ssl-build/$MACHINE_NAME/server.key" /ssl/server.key
`
// This is assuming CA cert is generated by server script.
// If we in any point in the future allow mix of 3rd party server and self signed ca for database
// this will need to be updated to include check for ca cert and build if needed.
// WORKAROUND we used /root/ssl-build/$MACHINE_NAME but because
// rhn-ssl-tool generates the certs in that folder, using hostname.
// rhn-ssl-tool, if provided, should use cname as dst folder.
const sslSetupDatabaseScript = `
getMachineName() {
hostname="$1"
hostname=$(echo "$hostname" | sed 's/\*/_star_/g')
field_count=$(echo "$hostname" | awk -F. '{print NF}')
if [ "$field_count" -lt 3 ]; then
echo "$hostname"
return 0
fi
end_field=$(expr "$field_count" - 2)
result=$(echo "$hostname" | cut -d. -f1-"$end_field")
echo "$result"
}
echo "Generating DB certificate..."
rhn-ssl-tool --gen-server --cert-expiration 3650 \
--dir /root/ssl-build --password "$CERT_PASS" \
--set-country "$CERT_COUNTRY" --set-state "$CERT_STATE" --set-city "$CERT_CITY" \
--set-org "$CERT_O" --set-org-unit "$CERT_OU" \
--cert-expiration 3650 --set-email "$CERT_EMAIL" \
--set-cname reportdb --set-cname db $cert_args
cp /root/ssl-build/RHN-ORG-TRUSTED-SSL-CERT /ssl/ca.crt
MACHINE_NAME=$(getMachineName "$HOSTNAME")
cp /root/ssl-build/$MACHINE_NAME/server.crt /ssl/reportdb.crt
cp /root/ssl-build/$MACHINE_NAME/server.key /ssl/reportdb.key
`
const sslValidateCA = `
CA_KEY=/root/ssl-build/RHN-ORG-PRIVATE-SSL-KEY
CA_PASS_FILE=/ssl/ca_pass
trap "test -f \"$CA_PASS_FILE\" && /bin/rm -f -- \"$CA_PASS_FILE\" " 0 1 2 3 13 15
echo "Validating CA..."
echo "$CERT_PASS" > "$CA_PASS_FILE"
test -f $CA_KEY || (echo "CA key is not available" && exit 1)
test -r "$CA_KEY" || (echo "CA key is not readable" && exit 2)
openssl rsa -noout -in "/root/ssl-build/RHN-ORG-PRIVATE-SSL-KEY" -passin "file:$CA_PASS_FILE" || \
(echo "Wrong CA key password" && exit 3)
`
070701000000ac000081a400000000000000000000000168ed21dd00000518000000000000000000000000000000000000002200000000mgradm/shared/podman/startstop.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func StartServices() error {
var dbErr error
if systemd.HasService(podman.DBService) {
dbErr = systemd.StartService(podman.DBService)
}
errs := utils.JoinErrors(
dbErr,
systemd.StartInstantiated(podman.ServerAttestationService),
systemd.StartInstantiated(podman.HubXmlrpcService),
systemd.StartInstantiated(podman.SalineService),
systemd.StartService(podman.ServerService),
)
if systemd.HasService(podman.SalineService + "@") {
errs = utils.JoinErrors(errs, systemd.StartInstantiated(podman.SalineService))
}
return errs
}
func StopServices() error {
errs := utils.JoinErrors(
systemd.StopInstantiated(podman.ServerAttestationService),
systemd.StopInstantiated(podman.HubXmlrpcService),
systemd.StopInstantiated(podman.SalineService),
systemd.StopService(podman.ServerService),
)
if systemd.HasService(podman.DBService) {
errs = utils.JoinErrors(errs, systemd.StopService(podman.DBService))
}
if systemd.HasService(podman.SalineService + "@") {
errs = utils.JoinErrors(errs, systemd.StopInstantiated(podman.SalineService))
}
return errs
}
070701000000ad000081a400000000000000000000000168ed21dd000019b8000000000000000000000000000000000000002700000000mgradm/shared/podman/startstop_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package podman
import (
"errors"
"fmt"
"testing"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
)
var allServices = []string{
podman.ServerService,
podman.DBService,
podman.SalineService + "@",
podman.ServerAttestationService + "@",
podman.HubXmlrpcService + "@",
}
func TestStartServices(t *testing.T) {
cases := []struct {
installed []string
enabled []string
expectedStarted []string
expectedNotStarted []string
startErrors map[string]error
err error
}{
// Regular case with only server and DB containers
{
installed: allServices,
enabled: []string{podman.ServerService, podman.DBService},
expectedStarted: []string{podman.ServerService, podman.DBService},
expectedNotStarted: []string{
podman.HubXmlrpcService + "@", podman.ServerAttestationService + "@",
podman.SalineService + "@",
},
},
// Regular case with an instance of all services.
{
installed: allServices,
enabled: []string{
podman.ServerService, podman.DBService,
podman.HubXmlrpcService + "@0", podman.ServerAttestationService + "@0", podman.SalineService + "@0",
},
expectedStarted: []string{
podman.ServerService, podman.DBService,
podman.HubXmlrpcService + "@0", podman.ServerAttestationService + "@0", podman.SalineService + "@0",
},
expectedNotStarted: []string{},
},
// In a migration from non-split DB to split DB we have no DB container yet
{
installed: []string{
podman.ServerService, podman.HubXmlrpcService + "@", podman.ServerAttestationService + "@",
},
enabled: []string{podman.ServerService},
expectedStarted: []string{podman.ServerService},
expectedNotStarted: []string{
podman.HubXmlrpcService + "@", podman.ServerAttestationService + "@", podman.DBService,
},
},
// Error case where both the server and the DB service fail to start
{
installed: allServices,
enabled: []string{podman.ServerService, podman.DBService},
expectedNotStarted: []string{
podman.ServerService, podman.DBService, podman.HubXmlrpcService + "@",
podman.ServerAttestationService + "@", podman.SalineService + "@",
},
startErrors: map[string]error{
podman.ServerService: errors.New("failed to start server"),
podman.DBService: errors.New("failed to start DB"),
},
err: errors.New("failed to start DB; failed to start server"),
},
}
for i, testCase := range cases {
driver := testutils.FakeSystemdDriver{
Installed: testCase.installed,
Enabled: testCase.enabled,
StartServiceErrors: testCase.startErrors,
}
systemd = podman.NewSystemdWithDriver(&driver)
err := StartServices()
prefix := fmt.Sprintf("case %d - ", i+1)
for _, service := range testCase.expectedStarted {
testutils.AssertContains(t, fmt.Sprintf("%s%s not started", prefix, service), driver.Running, service)
}
for _, service := range testCase.expectedNotStarted {
testutils.AssertNotContains(t, fmt.Sprintf("%s%s has been started", prefix, service), driver.Running, service)
}
testutils.AssertEquals(t, prefix+"unexpected error returned", testCase.err, err)
}
}
func TestStopServices(t *testing.T) {
cases := []struct {
installed []string
enabled []string
started []string
expectedStarted []string
expectedNotStarted []string
stopErrors map[string]error
err error
}{
// Regular case with only server and DB containers
{
installed: allServices,
enabled: []string{podman.ServerService, podman.DBService},
started: []string{podman.ServerService, podman.DBService},
expectedNotStarted: []string{
podman.ServerService, podman.DBService, podman.HubXmlrpcService + "@",
podman.ServerAttestationService + "@", podman.SalineService + "@",
},
},
// Regular case with an instance of all services.
{
installed: allServices,
enabled: []string{
podman.ServerService, podman.DBService,
podman.HubXmlrpcService + "@0", podman.ServerAttestationService + "@0", podman.SalineService + "@0",
},
started: []string{
podman.ServerService, podman.DBService,
podman.HubXmlrpcService + "@0", podman.ServerAttestationService + "@0", podman.SalineService + "@0",
},
expectedNotStarted: []string{
podman.ServerService, podman.DBService,
podman.HubXmlrpcService + "@0", podman.ServerAttestationService + "@0", podman.SalineService + "@0",
},
},
// In a migration from non-split DB to split DB we have no DB container yet
{
installed: []string{podman.ServerService, podman.HubXmlrpcService + "@", podman.ServerAttestationService + "@"},
enabled: []string{podman.ServerService},
started: []string{podman.ServerService},
expectedNotStarted: []string{
podman.ServerService, podman.HubXmlrpcService + "@", podman.ServerAttestationService + "@", podman.DBService,
},
},
// Error case where both the server and the DB service fail to start
{
installed: allServices,
enabled: []string{podman.ServerService, podman.DBService},
started: []string{podman.ServerService, podman.DBService},
expectedStarted: []string{podman.ServerService, podman.DBService},
expectedNotStarted: []string{
podman.HubXmlrpcService + "@",
podman.ServerAttestationService + "@", podman.SalineService + "@",
},
stopErrors: map[string]error{
podman.ServerService: errors.New("failed to stop server"),
podman.DBService: errors.New("failed to stop DB"),
},
err: errors.New("failed to stop server; failed to stop DB"),
},
}
for i, testCase := range cases {
driver := testutils.FakeSystemdDriver{
Installed: testCase.installed,
Enabled: testCase.enabled,
Running: testCase.started,
StopServiceErrors: testCase.stopErrors,
}
systemd = podman.NewSystemdWithDriver(&driver)
err := StopServices()
prefix := fmt.Sprintf("case %d - ", i+1)
for _, service := range testCase.expectedStarted {
testutils.AssertContains(t, fmt.Sprintf("%s%s has been stopped", prefix, service), driver.Running, service)
}
for _, service := range testCase.expectedNotStarted {
testutils.AssertNotContains(t, fmt.Sprintf("%s%s has not been stopped", prefix, service), driver.Running, service)
}
testutils.AssertEquals(t, prefix+"unexpected error returned", testCase.err, err)
}
}
070701000000ae000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001500000000mgradm/shared/podman070701000000af000081a400000000000000000000000168ed21dd00000fe9000000000000000000000000000000000000001f00000000mgradm/shared/saline/saline.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package saline
import (
"fmt"
"strings"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// Upgrade Saline.
func Upgrade(
systemd podman.Systemd,
authFile string,
registry string,
salineFlags adm_utils.SalineFlags,
baseImage types.ImageFlags,
tz string,
) error {
if err := writeSalineServiceFiles(
systemd, authFile, registry, salineFlags, baseImage, tz,
); err != nil {
return err
}
return systemd.ScaleService(salineFlags.Replicas, podman.SalineService)
}
func writeSalineServiceFiles(
systemd podman.Systemd,
authFile string,
registry string,
salineFlags adm_utils.SalineFlags,
baseImage types.ImageFlags,
tz string,
) error {
image := salineFlags.Image
if image.Name == "" {
// Don't touch the saline service in ptf if not already present.
return nil
}
if image.Tag == "" {
if baseImage.Tag != "" {
image.Tag = baseImage.Tag
} else {
image.Tag = "latest"
}
}
if !salineFlags.IsChanged {
log.Debug().Msg("Saline settings are not changed.")
} else if salineFlags.Replicas == 0 {
log.Debug().Msg("No Saline requested.")
} else if salineFlags.Replicas > 1 {
log.Warn().Msg(L("Multiple Saline container replicas are not currently supported, setting up only one."))
salineFlags.Replicas = 1
}
salineImage, err := utils.ComputeImage(registry, baseImage.Tag, image)
if err != nil {
return utils.Error(err, L("failed to compute image URL"))
}
pullEnabled := salineFlags.Replicas > 0 && salineFlags.IsChanged
preparedImage, err := podman.PrepareImage(authFile, salineImage, baseImage.PullPolicy, pullEnabled)
if err != nil {
return err
}
salineData := templates.SalineServiceTemplateData{
NamePrefix: "uyuni",
Network: podman.UyuniNetwork,
Volumes: utils.SalineVolumeMounts,
Image: preparedImage,
}
log.Info().Msg(L("Setting up Saline service"))
if err := utils.WriteTemplateToFile(salineData,
podman.GetServicePath(podman.SalineService+"@"), 0555, true); err != nil {
return utils.Error(err, L("failed to generate systemd service unit file"))
}
environment := fmt.Sprintf(`Environment=UYUNI_SALINE_IMAGE=%s`, preparedImage)
if err := podman.GenerateSystemdConfFile(
podman.SalineService+"@", "generated.conf", environment, true,
); err != nil {
return utils.Error(err, L("cannot generate systemd conf file"))
}
config := fmt.Sprintf(`Environment=TZ=%s
`, strings.TrimSpace(tz))
if err := podman.GenerateSystemdConfFile(podman.SalineService+"@", podman.CustomConf,
config, false); err != nil {
return utils.Error(err, L("cannot generate systemd user configuration file"))
}
if err := systemd.ReloadDaemon(false); err != nil {
return err
}
return nil
}
// SetupSalineContainer sets up the Saline service.
func SetupSalineContainer(
systemd podman.Systemd,
authFile string,
registry string,
salineFlags adm_utils.SalineFlags,
baseImage types.ImageFlags,
tz string,
) error {
if err := writeSalineServiceFiles(systemd, authFile, registry, salineFlags, baseImage, tz); err != nil {
return err
}
return EnableSaline(systemd, salineFlags.Replicas)
}
// EnableSaline enables the saline service if the number of replicas is 1.
// This function is meant for installation or migration, to enable or disable the service after, use ScaleService.
func EnableSaline(systemd podman.Systemd, replicas int) error {
if replicas > 1 {
log.Warn().Msg(L("Multiple Saline container replicas are not currently supported, setting up only one."))
replicas = 1
}
if replicas > 0 {
if err := systemd.ScaleService(replicas, podman.SalineService); err != nil {
return utils.Errorf(err, L("cannot enable service"))
}
}
return nil
}
070701000000b0000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001500000000mgradm/shared/saline070701000000b1000081a400000000000000000000000168ed21dd0000079e000000000000000000000000000000000000003600000000mgradm/shared/templates/attestationServiceTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
const attestationServiceTemplate = `
# uyuni-server-attestation.service, generated by mgradm
# Use an uyuni-server-attestation.service.d/local.conf file to override
[Unit]
Description=Uyuni server attestation container service
Wants=network.target
After=network-online.target
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
ExecStartPre=/bin/rm -f %t/uyuni-server-attestation-%i.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/podman rm --ignore --force -t 10 {{ .NamePrefix }}-server-attestation-%i
ExecStart=/bin/sh -c '/usr/bin/podman run \
--conmon-pidfile %t/uyuni-server-attestation-%i.pid \
--cidfile=%t/%n-%i.ctr-id \
--cgroups=no-conmon \
--sdnotify=conmon \
-d \
-e database_connection \
--secret={{ .DBUserSecret }},type=env,target=database_user \
--secret={{ .DBPassSecret }},type=env,target=database_password \
--replace \
--name {{ .NamePrefix }}-server-attestation-%i \
--hostname {{ .NamePrefix }}-server-attestation-%i.mgr.internal \
--network {{ .Network }} \
${UYUNI_SERVER_ATTESTATION_IMAGE}'
ExecStop=/usr/bin/podman stop --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
ExecStopPost=/usr/bin/podman rm -f --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
PIDFile=%t/uyuni-server-attestation-%i.pid
TimeoutStopSec=60
TimeoutStartSec=60
Type=forking
[Install]
WantedBy=multi-user.target default.target
`
// AttestationServiceTemplateData holds information to create systemd file for coco container.
type AttestationServiceTemplateData struct {
NamePrefix string
Image string
Network string
DBUserSecret string
DBPassSecret string
}
// Render will create the systemd configuration file.
func (data AttestationServiceTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("service").Parse(attestationServiceTemplate))
return t.Execute(wr, data)
}
070701000000b2000081a400000000000000000000000168ed21dd00000695000000000000000000000000000000000000002700000000mgradm/shared/templates/certificate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"fmt"
"strings"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
)
const certificate = `apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .SecretName }}
namespace: {{ .Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
spec:
secretName: {{ .SecretName }}
secretTemplate:
labels:
app: ` + kubernetes.ServerApp + `
isCA: false
usages:
- server auth
dnsNames:
{{- range .DNSNames }}
- {{ . }}
{{- end }}
issuerRef:
name: ` + kubernetes.CAIssuerName + `
kind: Issuer
group: cert-manager.io
---
`
// CertificateData is the template rendering a cert-manager Certificate object using the uyuni CA Issuer.
type CertificateData struct {
// Namespace is the Kubernetes namespace where to create the certificate object.
Namespace string
// SecretName is the name of the secret generated by the certificate object and will be used as its name too.
SecretName string
// DNSNames is a slice of DNS names to generate the SSL certificate for.
// At least one has to be provided.
DNSNames []string
}
// Render generates a string from the certificate data.
func (c CertificateData) Render() (out string, err error) {
if len(c.DNSNames) == 0 {
return "", fmt.Errorf(L("%s certificate needs at least one DNS entry"), c.SecretName)
}
builder := new(strings.Builder)
t := template.Must(template.New("certificate").Parse(certificate))
err = t.Execute(builder, c)
if err == nil {
out = builder.String()
}
return
}
070701000000b3000081a400000000000000000000000168ed21dd0000094f000000000000000000000000000000000000003300000000mgradm/shared/templates/generatedIssuerTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
)
// Deploy self-signed issuer or CA Certificate and key.
const generatedCAIssuerTemplate = `apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: uyuni-issuer
namespace: {{ .IssuerTemplate.Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: uyuni-ca
namespace: {{ .IssuerTemplate.Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
spec:
isCA: true
{{- if or .Country .State .City .Org .OrgUnit }}
subject:
{{- if .Country }}
countries: ["{{ .Country }}"]
{{- end }}
{{- if .State }}
provinces: ["{{ .State }}"]
{{- end }}
{{- if .City }}
localities: ["{{ .City }}"]
{{- end }}
{{- if .Org }}
organizations: ["{{ .Org }}"]
{{- end }}
{{- if .OrgUnit }}
organizationalUnits: ["{{ .OrgUnit }}"]
{{- end }}
{{- end }}
{{- if .Email }}
emailAddresses:
- {{ .Email }}
{{- end }}
commonName: {{ .IssuerTemplate.FQDN }}
dnsNames:
- {{ .IssuerTemplate.FQDN }}
secretName: ` + kubernetes.CASecretName + `
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: uyuni-issuer
kind: Issuer
group: cert-manager.io
---
`
func NewGeneratedCAIssuerTemplate(
namespace string,
fqdn string,
country string,
state string,
city string,
org string,
orgUnit string,
email string,
) GeneratedCAIssuerTemplate {
template := GeneratedCAIssuerTemplate{
IssuerTemplate: IssuerTemplate{
Namespace: namespace,
FQDN: fqdn,
},
Country: country,
State: state,
City: city,
Org: org,
OrgUnit: orgUnit,
Email: email,
}
template.template = template
return template
}
// GeneratedCAIssuerTemplate is a template to render cert-manager issuers for a generated self-signed CA.
type GeneratedCAIssuerTemplate struct {
IssuerTemplate
Country string
State string
City string
Org string
OrgUnit string
Email string
}
// Render writers the issuer text in the wr parameter.
func (data GeneratedCAIssuerTemplate) Render(wr io.Writer) error {
t := template.Must(template.New("issuer").Parse(generatedCAIssuerTemplate + uyuniCAIssuer))
return t.Execute(wr, data)
}
070701000000b4000081a400000000000000000000000168ed21dd00000869000000000000000000000000000000000000003400000000mgradm/shared/templates/hubXmlrpcServiceTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
const hubXmlrpcServiceTemplate = `# uyuni-uyuni-hub-xmlrpc.service, generated by mgradm
# Use an uyuni-hub-xmlrpc.service.d/local.conf file to override
[Unit]
Description=Uyuni Hub XMLRPC API container service
Wants=network.target
After=network-online.target
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Environment=HUB_API_URL=http://{{ .ServerHost }}:80/rpc/api
Environment=HUB_CONNECT_USING_SSL=true
Restart=on-failure
ExecStartPre=/bin/rm -f %t/uyuni-hub-xmlrpc-%i.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/podman rm --ignore --force -t 10 {{ .NamePrefix }}-hub-xmlrpc-%i
ExecStart=/usr/bin/podman run \
--conmon-pidfile %t/uyuni-hub-xmlrpc-%i.pid \
--cidfile=%t/%n-%i.ctr-id \
--cgroups=no-conmon \
--sdnotify=conmon \
-d \
--replace \
{{- range .Ports }}
-p {{ .Exposed }}:{{ .Port }}{{if .Protocol}}/{{ .Protocol }}{{end}} \
{{- end }}
-e HUB_API_URL \
-e HUB_CONNECT_TIMEOUT \
-e HUB_REQUEST_TIMEOUT \
-e HUB_CONNECT_USING_SSL \
--secret {{ .CaSecret }},type=mount,target={{ .CaPath }} \
--name {{ .NamePrefix }}-hub-xmlrpc-%i \
--hostname {{ .NamePrefix }}-hub-xmlrpc-%i.mgr.internal \
--network {{ .Network }} \
${UYUNI_HUB_XMLRPC_IMAGE}
ExecStop=/usr/bin/podman stop --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
ExecStopPost=/usr/bin/podman rm -f --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
PIDFile=%t/uyuni-hub-xmlrpc-%i.pid
TimeoutStopSec=60
TimeoutStartSec=60
Type=forking
[Install]
WantedBy=multi-user.target default.target
`
// HubXmlrpcServiceTemplateData holds information to create the systemd file.
type HubXmlrpcServiceTemplateData struct {
CaSecret string
CaPath string
Ports []types.PortMap
NamePrefix string
Image string
Network string
ServerHost string
}
// Render will create the systemd configuration file.
func (data HubXmlrpcServiceTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("service").Parse(hubXmlrpcServiceTemplate))
return t.Execute(wr, data)
}
070701000000b5000081a400000000000000000000000168ed21dd000006ef000000000000000000000000000000000000002a00000000mgradm/shared/templates/issuerTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"os"
"path/filepath"
"strings"
"github.com/rs/zerolog"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// IssuerTemplate is the base structure for all issuer templates.
type IssuerTemplate struct {
Namespace string
FQDN string
template utils.Template
}
// Apply renders the issuer with the certificates and applies them all at once.
func (data IssuerTemplate) Apply() error {
// Create the server and database certificates
serverCert, err := CertificateData{
Namespace: data.Namespace,
SecretName: kubernetes.CertSecretName,
DNSNames: []string{data.FQDN},
}.Render()
if err != nil {
return err
}
dbCert, err := CertificateData{
Namespace: data.Namespace,
SecretName: kubernetes.DBCertSecretName,
DNSNames: []string{data.FQDN, "db", "reportdb"},
}.Render()
if err != nil {
return err
}
tempDir, cleaner, err := utils.TempDir()
if err != nil {
return err
}
defer cleaner()
path := filepath.Join(tempDir, "issuer.yaml")
builder := new(strings.Builder)
if err := data.template.Render(builder); err != nil {
return utils.Error(err, L("failed to render issuer template"))
}
builder.WriteString(serverCert)
builder.WriteString(dbCert)
if err := os.WriteFile(path, []byte(builder.String()), 0700); err != nil {
return utils.Errorf(err, L("failed to write issuer and certificates to %s file"), path)
}
_, err = utils.NewRunner("kubectl", "apply", "-f", path).Log(zerolog.DebugLevel).Exec()
if err != nil {
return utils.Error(err, L("failed to apply template"))
}
return nil
}
070701000000b6000081a400000000000000000000000168ed21dd00000a27000000000000000000000000000000000000003200000000mgradm/shared/templates/mgrSetupScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
//nolint:lll
const mgrSetupScriptTemplate = `#!/bin/sh
if test -e /root/.MANAGER_SETUP_COMPLETE; then
echo "Server appears to be already configured. Installation options may be ignored."
exit 0
fi
{{- if .DebugJava }}
echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:8003,server=y,suspend=n" ' >> /etc/tomcat/conf.d/remote_debug.conf
echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:8001,server=y,suspend=n" ' >> /etc/rhn/taskomatic.conf
echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:8002,server=y,suspend=n" ' >> /usr/share/rhn/config-defaults/rhn_search_daemon.conf
{{- end }}
/usr/lib/susemanager/bin/mgr-setup
RESULT=$?
if test -n "{{ .AdminPassword }}"; then
echo "starting tomcat..."
(su -s /usr/bin/sh -g tomcat -G www -G susemanager tomcat /usr/lib/tomcat/server start)&
echo "starting apache2..."
/usr/sbin/start_apache2 -k start
echo "Creating first user..."
{{ if .NoSSL }}
CURL_SCHEME="http"
{{ else }}
CURL_SCHEME="-L -k https"
{{ end }}
curl -o /tmp/curl-retry -s --retry 7 $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do
HTTP_CODE=$(curl -o /dev/null -s -w %{http_code} $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do)
if test "$HTTP_CODE" == "200"; then
echo "Creating administration user"
curl -s -o /tmp/curl_out \
-d "orgName={{ .OrgName }}" \
-d "adminLogin={{ .AdminLogin }}" \
-d "adminPassword={{ .AdminPassword }}" \
-d "firstName={{ .AdminFirstName }}" \
-d "lastName={{ .AdminLastName }}" \
-d "email={{ .AdminEmail }}" \
$CURL_SCHEME://localhost/rhn/manager/api/org/createFirst
if ! grep -q '^{"success":true' /tmp/curl_out ; then
echo "Failed to create the administration user"
cat /tmp/curl_out
fi
rm -f /tmp/curl_out
elif test "$HTTP_CODE" == "403"; then
echo "Administration user already exists, reusing"
else
RESULT=1
fi
fi
exit $RESULT
`
// MgrSetupScriptTemplateData represents information used to create setup script.
type MgrSetupScriptTemplateData struct {
NoSSL bool
DebugJava bool
AdminPassword string
AdminLogin string
AdminFirstName string
AdminLastName string
AdminEmail string
OrgName string
}
// Render will create setup script.
func (data MgrSetupScriptTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(mgrSetupScriptTemplate))
return t.Execute(wr, data)
}
070701000000b7000081a400000000000000000000000168ed21dd00002b97000000000000000000000000000000000000003100000000mgradm/shared/templates/migrateScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
//nolint:lll
const migrationScriptTemplate = `
set -e
SSH_CONFIG=""
if test -e /tmp/ssh_config; then
SSH_CONFIG="-F /tmp/ssh_config"
fi
SSH="ssh -o User={{ .User }} -A $SSH_CONFIG "
if $SSH {{ .SourceFqdn }} "[[ ! -f /etc/susemanager-release && ! -f /etc/uyuni-release ]]"; then
echo "Cannot find neither /etc/susemanager-release nor /etc/uyuni-release. Is the source a no-containerized server?"
exit 1
fi
{{ if .Prepare }}
echo "Preparing migration..."
$SSH {{ .SourceFqdn }} "sudo systemctl start postgresql.service"
{{ else }}
echo "Stopping spacewalk service..."
$SSH {{ .SourceFqdn }} "sudo spacewalk-service stop ; sudo systemctl start postgresql.service"
{{ end }}
$SSH {{ .SourceFqdn }} \
"echo \"COPY (SELECT MIN(CONCAT(org_id, '-', label)) AS target, base_path FROM rhnKickstartableTree GROUP BY base_path) TO STDOUT WITH CSV;\" \
|sudo spacewalk-sql --select-mode - " > distros
{{ if not .Prepare }}
echo "Stopping posgresql service..."
$SSH {{ .SourceFqdn }} "sudo systemctl stop postgresql.service"
{{ end }}
while IFS="," read -r target path ; do
if $SSH -n {{ .SourceFqdn }} test -e "$path" ; then
echo "-/ $path"
# protect the targets that can be already synced in --prepare phase
echo "P/ /srv/www/distributions/$target"
fi
done < distros > exclude_list
# exclude all config files which already exist and are not marked noreplace
rpm -qa --qf '[%{fileflags},%{filenames}\n]' |grep ",/etc/" | while IFS="," read -r flags path ; do
# config(noreplace) is 1<<4 (from lib/rpmlib.h)
if [ $(( $flags & 16 )) -eq 0 -a -f "$path" ] ; then
echo "-/ $path" >> exclude_list
fi
done
# No need to migrate zypper's cache
echo "-/ /var/cache/zypp/**" >> exclude_list
# Migrating the reposync cache files doesn't bring value and contains dangling symlinks (bsc#1231769)
echo "-/ /var/cache/rhn/reposync/**" >> exclude_list
# exclude mgr-sync configuration file, in this way it would be re-generated (bsc#1228685)
echo "-/ /root/.mgr-sync" >> exclude_list
# exclude tomcat default configuration. All settings should be store in /etc/tomcat/conf.d/
echo "-/ /etc/sysconfig/tomcat" >> exclude_list
echo "-/ /etc/tomcat/tomcat.conf" >> exclude_list
# exclude schema migration files
echo "-/ /etc/sysconfig/rhn/reportdb-schema-upgrade" >> exclude_list
echo "-/ /etc/sysconfig/rhn/schema-upgrade" >> exclude_list
# exclude lastlog - it is not needed and can be too large
echo "-/ /var/log/lastlog" >> exclude_list
# exclude systemd units as they will be recreated later
echo "-/ /etc/systemd/**" >> exclude_list
# Exclude py2*-compat-salt.conf as they can't work in the container
echo "-/ /etc/salt/master.d/py2*-compat-salt.conf" >> exclude_list
# uyuni issue #10055. Some old system might have this file
echo "-/ /etc/apache2/vhosts.d/cobbler.conf" >> exclude_list
for folder in {{ range .Volumes }}{{ .MountPath }} {{ end }};
do
RSYNC_ARGS=-l
# Those folders used to have symlinks in the cloud images.
if test "$folder" = "/var/cache" -o "$folder" = "/var/spacewalk" -o \
"$folder" = "/var/lib/pgsql"; then
RSYNC_ARGS=-L
fi
if $SSH {{ .SourceFqdn }} test -e $folder; then
echo "Copying $folder..."
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz $RSYNC_ARGS --trust-sender -f 'merge exclude_list' {{ .SourceFqdn }}:$folder/ $folder;
else
echo "Skipping missing $folder..."
fi
done;
spacewalk-service enable
if $SSH {{ .SourceFqdn }} systemctl is-enabled tftp.socket; then
echo "Enabling tftp socket..."
systemctl enable tftp.socket
fi
sed -i -e 's|appBase="webapps"|appBase="/usr/share/susemanager/www/tomcat/webapps"|' /etc/tomcat/server.xml
sed -i -e 's|DocumentRoot\s*"/srv/www/htdocs"|DocumentRoot "/usr/share/susemanager/www/htdocs"|' /etc/apache2/vhosts.d/vhost-ssl.conf
echo "Migrating auto-installable distributions..."
while IFS="," read -r target path ; do
if $SSH -n {{ .SourceFqdn }} test -e $path ; then
echo "Copying distribution $target from $path"
mkdir -p "/srv/www/distributions/$target"
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz "{{ .SourceFqdn }}:$path/" "/srv/www/distributions/$target"
# Adjust cobbler distro configuration
for config in $(grep "$path/" -r /var/lib/cobbler/collections/distros/ -l); do
sed "s|$path/|/srv/www/distributions/$target/|g" -i $config
done
else
echo "Skipping missing distribution $path..."
fi
done < distros
echo "Migrating auto-installation snippets..."
$SSH {{ .SourceFqdn }} "find /var/lib/cobbler/snippets/spacewalk/* -type d" > snippets_dirs
while read -r snippets_dir ; do
if $SSH -n {{ .SourceFqdn }} test -e $snippets_dir; then
echo "Copying autoinstallation snippets from $snippets_dir..."
mkdir -p "$snippets_dir"
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz "{{ .SourceFqdn }}:$snippets_dir" "$snippets_dir";
else
echo "Skipping autoinstallation snippets from $snippets_dir.."
fi
done < snippets_dirs
if $SSH {{ .SourceFqdn }} test -e /etc/tomcat/conf.d; then
echo "Copying tomcat configuration.."
mkdir -p /etc/tomcat/conf.d
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/tomcat/conf.d /etc/tomcat/;
else
echo "Skipping tomcat configuration.."
fi
echo "Migrating monitoring configuration..."
declare -A monitoring_conf
monitoring_conf+=([/usr/lib/systemd/system/tomcat.service.d/jmx.conf]="/etc/sysconfig/tomcat/systemd/")
monitoring_conf+=([/usr/lib/systemd/system/taskomatic.service.d/jmx.conf]="/etc/sysconfig/taskomatic/systemd/")
monitoring_conf+=([/etc/postgres_exporter/postgres_exporter_queries.yaml]="/etc/sysconfig/prometheus-postgres_exporter/")
monitoring_conf+=([/etc/systemd/system/prometheus-postgres_exporter.service.d/60-server.conf]="/etc/sysconfig/prometheus-postgres_exporter/systemd/")
monitoring_conf+=([/etc/postgres_exporter/pg_passwd]="/etc/sysconfig/prometheus-postgres_exporter/")
for config_file in ${!monitoring_conf[@]}
do
if $SSH {{ .SourceFqdn }} test -e ${config_file}; then
mkdir -p "${monitoring_conf[${config_file}]}"
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz "{{ .SourceFqdn }}:${config_file}" "${monitoring_conf[${config_file}]}";
fi
done
if $SSH {{ .SourceFqdn }} systemctl is-enabled prometheus-postgres_exporter.service; then
echo "Enabling prometheus-postgres_exporter service..."
ln -s /usr/lib/systemd/system/prometheus-postgres_exporter.service /etc/systemd/system/multi-user.target.wants/prometheus-postgres_exporter.service
fi
rm -f /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT;
ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT;
echo "Extracting time zone..."
$SSH {{ .SourceFqdn }} timedatectl show -p Timezone >/var/lib/uyuni-tools/data
echo "Extracting postgresql versions..."
echo "current_pg_version=$(cat /var/lib/pgsql/data/PG_VERSION)" >> /var/lib/uyuni-tools/data
echo "current_libc_version=2.31" >> /var/lib/uyuni-tools/data
grep '^db_user' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
grep '^db_password' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
grep '^db_name' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
grep '^db_port' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
grep '^report_db_user' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
grep '^report_db_password' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data
$SSH {{ .SourceFqdn }} sh -c "systemctl list-unit-files | grep hub-xmlrpc-api | grep -q active && echo has_hubxmlrpc=true || echo has_hubxmlrpc=false" >>/var/lib/uyuni-tools/data
(test $($SSH {{ .SourceFqdn }} grep jdwp -r /etc/tomcat/conf.d/ /etc/rhn/taskomatic.conf | wc -l) -gt 0 && echo debug=true || echo debug=false) >>/var/lib/uyuni-tools/data
echo "Altering configuration for domain resolution..."
sed 's/^db_host = .*/db_host = {{ .DBHost }}/' -i /etc/rhn/rhn.conf;
sed 's/^report_db_host = .*/report_db_host = {{ .ReportDBHost }}/' -i /etc/rhn/rhn.conf;
if ! grep -q '^java.hostname *=' /etc/rhn/rhn.conf; then
sed 's/server\.jabber_server/java\.hostname/' -i /etc/rhn/rhn.conf;
fi
echo 'client_use_localhost: true' >> /etc/cobbler/settings.d/zz-uyuni.settings;
echo "Altering configuration for container environment..."
sed 's/address=[^:]*:/address=*:/' -i /etc/rhn/taskomatic.conf;
echo "Altering tomcat configuration..."
sed 's/--add-modules java.annotation,com.sun.xml.bind://' -i /etc/tomcat/conf.d/*
sed 's/-XX:-UseConcMarkSweepGC//' -i /etc/tomcat/conf.d/*
test -f /etc/tomcat/conf.d/remote_debug.conf && sed 's/address=[^:]*:/address=*:/' -i /etc/tomcat/conf.d/remote_debug.conf
# Alter rhn.conf to ensure mirror is set to /mirror if set at all
sed 's/server.susemanager.fromdir =.*/server.susemanager.fromdir = \/mirror/' -i /etc/rhn/rhn.conf
{{ if .Kubernetes }}
echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf;
echo "Extracting SSL certificate and authority"
extractedSSL=
if test -d /root/ssl-build; then
# We may have an old unused ssl-build folder, check if the CA matches the deployed one
buildCaFingerprint=
if test -e /root/ssl-build/RHN-ORG-TRUSTED-SSL-CERT; then
buildCaFingerprint=$(openssl x509 -in /root/ssl-build/RHN-ORG-TRUSTED-SSL-CERT -noout -fingerprint)
fi
caFingerprint=$(openssl x509 -in /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT -noout -fingerprint)
if test "$buildCaFingerprint" == "$caFingerprint"; then
echo "Extracting SSL Root CA key..."
# Extract the SSL CA certificate and key.
# The server certificate will be auto-generated by cert-manager using it, so no need to copy it.
cp /root/ssl-build/RHN-ORG-PRIVATE-SSL-KEY /var/lib/uyuni-tools/
extractedSSL="1"
fi
fi
# This Root CA file is common to both cases
cp /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /var/lib/uyuni-tools/RHN-ORG-TRUSTED-SSL-CERT
if test "extractedSSL" != "1"; then
# For third party certificates, the CA chain is in the certificate file.
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/private/spacewalk.key /var/lib/uyuni-tools/
rsync --delete -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/certs/spacewalk.crt /var/lib/uyuni-tools/
fi
echo "Removing useless ssl-build folder..."
rm -rf /root/ssl-build
# The content of this folder will be a RO mount from a configmap
rm /etc/pki/trust/anchors/*
{{ end }}
echo "DONE"`
// MigrateScriptTemplateData represents migration information used to create migration script.
type MigrateScriptTemplateData struct {
Volumes []types.VolumeMount
SourceFqdn string
User string
Kubernetes bool
Prepare bool
DBHost string
ReportDBHost string
}
// Render will create migration script.
func (data MigrateScriptTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(migrationScriptTemplate))
return t.Execute(wr, data)
}
070701000000b8000081a400000000000000000000000168ed21dd0000098b000000000000000000000000000000000000003700000000mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
//nolint:lll
const postgresFinalizeScriptTemplate = `
{{ if .RunReindex }}
# Reindexing may not be needed for every collation change, but better be on the safe side.
echo "Reindexing database. This may take a while, please do not cancel it!"
database=$(sed -n "s/^\s*db_name\s*=\s*\([^ ]*\)\s*$/\1/p" /etc/rhn/rhn.conf)
spacewalk-sql --select-mode - <<<"REINDEX DATABASE \"${database}\";"
# After reindexing, alter the collation version
# Some databases like template0 may not accept changes and that's fine
set +e
for dbname in $(echo -e "\\pset tuples_only\n\\l\n" | spacewalk-sql --select-mode - | grep -v "is on" | cut -d '|' -f 1); do
if test -n "$dbname";
then
echo "Refreshing collation of database $dbname"
spacewalk-sql --select-mode - <<<"ALTER DATABASE $dbname REFRESH COLLATION VERSION;"
fi
done
set -e
{{ end }}
{{ if .RunSchemaUpdate }}
echo "Schema update..."
/usr/sbin/spacewalk-startup-helper check-database
{{ end }}
{{ if .Migration }}
echo "Updating auto-installable distributions..."
spacewalk-sql --select-mode - <<EOT
SELECT MIN(CONCAT(org_id, '-', label)) AS target, base_path INTO TEMP TABLE dist_map FROM rhnKickstartableTree GROUP BY base_path;
UPDATE rhnKickstartableTree SET base_path = CONCAT('/srv/www/distributions/', target)
from dist_map WHERE dist_map.base_path = rhnKickstartableTree.base_path;
DROP TABLE dist_map;
EOT
echo "Schedule a system list update task..."
spacewalk-sql --select-mode - <<EOT
insert into rhnTaskQueue (id, org_id, task_name, task_data)
SELECT nextval('rhn_task_queue_id_seq'), 1, 'update_system_overview', s.id
from rhnserver s
where not exists (select 1 from rhntaskorun r join rhntaskotemplate t on r.template_id = t.id
join rhntaskobunch b on t.bunch_id = b.id where b.name='update-system-overview-bunch' limit 1);
EOT
{{ end }}
`
// FinalizePostgresTemplateData represents information used to create PostgreSQL migration script.
type FinalizePostgresTemplateData struct {
RunReindex bool
RunSchemaUpdate bool
Migration bool
Kubernetes bool
}
// Render will create script for finalizing PostgreSQL upgrade.
func (data FinalizePostgresTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(postgresFinalizeScriptTemplate))
return t.Execute(wr, data)
}
070701000000b9000081a400000000000000000000000168ed21dd00000425000000000000000000000000000000000000003600000000mgradm/shared/templates/pgsqlMigrateScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
//nolint:lll
const pgsqlMigrationScriptTemplate = `
set -e -x
if [ -d /var/lib/pgsql/data/data ] ; then
shopt -s dotglob
rsync -a --exclude=/var/lib/pgsql/data/data/pg_hba.conf /var/lib/pgsql/data/data/ /var/lib/pgsql/data/ 2>/dev/null
rm -rf /var/lib/pgsql/data/data
fi
{{ if .ReportDBHost }}
sed 's/^report_db_host = .*/report_db_host = {{ .ReportDBHost }}/' -i /etc/rhn/rhn.conf;
{{ end }}
{{ if .DBHost }}
sed 's/^db_host = .*/db_host = {{ .DBHost }}/' -i /etc/rhn/rhn.conf;
{{ end }}
echo "DONE"`
// MigrateScriptTemplateData represents migration information used to create migration script.
type PgsqlMigrateScriptTemplateData struct {
DBHost string
ReportDBHost string
}
// Render will create migration script.
func (data PgsqlMigrateScriptTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(pgsqlMigrationScriptTemplate))
return t.Execute(wr, data)
}
070701000000ba000081a400000000000000000000000168ed21dd00000bc6000000000000000000000000000000000000003000000000mgradm/shared/templates/pgsqlServiceTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
const pgsqlServiceTemplate = `# uyuni-db-server.service, generated by mgradm
# Use an uyuni-db-server.service.d/local.conf file to override
[Unit]
Description=Uyuni database container service
Wants=network.target
After=network-online.target
RequiresMountsFor=%t/containers
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
ExecStartPre=/bin/rm -f %t/%n.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/podman rm --ignore --force -t 10 {{ .NamePrefix }}-db
ExecStart=/bin/sh -c '/usr/bin/podman run \
--conmon-pidfile %t/%n.pid \
--cidfile=%t/%n.ctr-id \
--cgroups=no-conmon \
--shm-size=0 \
--shm-size-systemd=0 \
--sdnotify=conmon \
-d \
--name {{ .NamePrefix }}-db \
--hostname {{ .NamePrefix }}-db.mgr.internal \
--network-alias db \
--network-alias reportdb \
--secret {{ .CaSecret }},type=mount,target={{ .CaPath }} \
--secret {{ .KeySecret }},type=mount,uid=999,mode=0400,target={{ .KeyPath }} \
--secret {{ .CertSecret }},type=mount,target={{ .CertPath }} \
--secret {{ .AdminUser }},type=env,target=POSTGRES_USER \
--secret {{ .AdminPassword }},type=env,target=POSTGRES_PASSWORD \
--secret {{ .ManagerUser }},type=env,target=MANAGER_USER \
--secret {{ .ManagerPassword }},type=env,target=MANAGER_PASS \
--secret {{ .ReportUser }},type=env,target=REPORT_DB_USER \
--secret {{ .ReportPassword }},type=env,target=REPORT_DB_PASS \
{{- range .Ports }}
-p {{ .Exposed }}:{{ .Port }}{{if .Protocol}}/{{ .Protocol }}{{end}} \
{{- if $.IPV6Enabled }}
-p [::]:{{ .Exposed }}:{{ .Port }}{{if .Protocol}}/{{ .Protocol }}{{end}} \
{{- end }}
{{- end }}
{{- range .Volumes }}
-v {{ .Name }}:{{ .MountPath }} \
{{- end }}
--network {{ .Network }} \
${PODMAN_EXTRA_ARGS} ${UYUNI_IMAGE}'
ExecStop=/usr/bin/podman stop \
--ignore -t 10 \
--cidfile=%t/%n.ctr-id
ExecStopPost=/usr/bin/podman rm \
-f \
--ignore -t 10 \
--cidfile=%t/%n.ctr-id
PIDFile=%t/%n.pid
TimeoutStopSec=180
TimeoutStartSec=900
Type=forking
[Install]
WantedBy=multi-user.target default.target
`
// PostgresServiceTemplateData POD information to create systemd file.
type PgsqlServiceTemplateData struct {
Volumes []types.VolumeMount
NamePrefix string
Ports []types.PortMap
Image string
Network string
IPV6Enabled bool
CaSecret string
CaPath string
CertSecret string
CertPath string
KeySecret string
KeyPath string
AdminUser string
AdminPassword string
ManagerUser string
ManagerPassword string
ReportUser string
ReportPassword string
}
// Render will create the systemd configuration file.
func (data PgsqlServiceTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("service").Parse(pgsqlServiceTemplate))
return t.Execute(wr, data)
}
070701000000bb000081a400000000000000000000000168ed21dd00000a5d000000000000000000000000000000000000003d00000000mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
//nolint:lll
const postgreSQLVersionUpgradeScriptTemplate = `#!/bin/bash
set -e
echo "PostgreSQL version upgrade"
OLD_VERSION={{ .OldVersion }}
NEW_VERSION={{ .NewVersion }}
echo "Testing presence of postgresql$NEW_VERSION..."
test -d /usr/lib/postgresql$NEW_VERSION/bin
echo "Testing presence of postgresql$OLD_VERSION..."
test -d /usr/lib/postgresql$OLD_VERSION/bin
BACKUP_DIR={{ .BackupDir }}/backup
echo "Create a database backup at $BACKUP_DIR"
test -d "$BACKUP_DIR" && mv "$BACKUP_DIR" "${BACKUP_DIR}$(date '+%Y%m%d_%H%M%S')"
mkdir -p "$BACKUP_DIR"
chown postgres:postgres "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
shopt -s dotglob
mv /var/lib/pgsql/data/* "$BACKUP_DIR"
echo "Create new database directory..."
chown -R postgres:postgres /var/lib/pgsql
if [ -e /etc/pki/tls/private/pg-spacewalk.key ]; then
echo "Enforce key permission"
chown -R postgres:postgres /etc/pki/tls/private/pg-spacewalk.key
chown -R postgres:postgres /etc/pki/tls/certs/spacewalk.crt
fi
echo "Initialize new postgresql $NEW_VERSION database..."
. /etc/sysconfig/postgresql 2>/dev/null # Load locale for SUSE
PGHOME=$(getent passwd postgres | cut -d ":" -f6)
if [ -z $POSTGRES_LANG ]; then
POSTGRES_LANG="en_US.UTF-8"
[ ! -z $LC_CTYPE ] && POSTGRES_LANG=$LC_CTYPE
fi
echo "Running initdb using postgres user"
echo "Any suggested command from the console should be run using postgres user"
su -s /bin/bash - postgres -c "initdb -D /var/lib/pgsql/data --locale=$POSTGRES_LANG"
echo "Successfully initialized new postgresql $NEW_VERSION database."
echo "Temporarily disable SSL in the old posgresql configuration"
cp "${BACKUP_DIR}/postgresql.conf" "${BACKUP_DIR}/postgresql.conf.bak"
sed 's/^ssl/#ssl/' -i "${BACKUP_DIR}/postgresql.conf"
su -s /bin/bash - postgres -c "pg_upgrade --old-bindir=/usr/lib/postgresql$OLD_VERSION/bin --new-bindir=/usr/lib/postgresql$NEW_VERSION/bin --old-datadir=\"$BACKUP_DIR\" --new-datadir=/var/lib/pgsql/data"
echo "Enable SSL again"
cp "${BACKUP_DIR}/postgresql.conf.bak" "${BACKUP_DIR}/postgresql.conf"
echo "DONE"`
// PostgreSQLVersionUpgradeTemplateData represents information used to create PostgreSQL upgrade script.
type PostgreSQLVersionUpgradeTemplateData struct {
OldVersion string
NewVersion string
BackupDir string
}
// Render will create PostgreSQL upgrade script.
func (data PostgreSQLVersionUpgradeTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(postgreSQLVersionUpgradeScriptTemplate))
return t.Execute(wr, data)
}
070701000000bc000081a400000000000000000000000168ed21dd00000597000000000000000000000000000000000000003500000000mgradm/shared/templates/postUpgradeScriptTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
)
const postUpgradeScriptTemplate = `
sed 's/cobbler\.host.*/cobbler\.host = localhost/' -i /etc/rhn/rhn.conf;
if grep -q uyuni_authentication_endpoint /etc/cobbler/settings.d/zz-uyuni.settings; then
echo 'uyuni_authentication_endpoint: "http://localhost"' >> /etc/cobbler/settings.d/zz-uyuni.settings
else
sed 's/uyuni_authentication_endpoint.*/uyuni_authentication_endpoint: http:\/\/localhost/' \
-i /etc/cobbler/settings.d/zz-uyuni.settings;
fi
if grep -q pam_auth_service /etc/rhn/rhn.conf; then
echo 'pam_auth_service = susemanager' >> /etc/rhn/rhn.conf
else
sed 's/pam_auth_service.*/pam_auth_service = susemanager/' -i /etc/rhn/rhn.conf;
fi
if test -e /etc/sysconfig/prometheus-postgres_exporter/systemd/60-server.conf; then
sed 's/\/etc\/postgres_exporter\//\/etc\/sysconfig\/prometheus-postgres_exporter\//' \
-i /etc/sysconfig/prometheus-postgres_exporter/systemd/60-server.conf;
fi
echo "DONE"`
// PostUpgradeTemplateData represents information used to create post upgrade.
type PostUpgradeTemplateData struct {
}
// Render will create script for finalizing PostgreSQL upgrade.
func (data PostUpgradeTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("script").Parse(postUpgradeScriptTemplate))
return t.Execute(wr, data)
}
070701000000bd000081a400000000000000000000000168ed21dd00000684000000000000000000000000000000000000003200000000mgradm/shared/templates/reusedCaIssuerTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
)
const uyuniCAIssuer = `apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: ` + kubernetes.CAIssuerName + `
namespace: {{ .IssuerTemplate.Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
spec:
ca:
secretName: ` + kubernetes.CASecretName + `
---
`
const reusedCAIssuerTemplate = `apiVersion: v1
kind: Secret
type: kubernetes.io/tls
metadata:
name: ` + kubernetes.CASecretName + `
namespace: {{ .IssuerTemplate.Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
data:
ca.crt: {{ .Certificate }}
tls.crt: {{ .Certificate }}
tls.key: {{ .Key }}
---
`
// NewReusedCAIssuerTemplate creates a new ReusedCAIssuerTemplate object.
func NewReusedCAIssuerTemplate(
namespace string,
fqdn string,
certificate string,
key string,
) ReusedCAIssuerTemplate {
template := ReusedCAIssuerTemplate{
IssuerTemplate: IssuerTemplate{
Namespace: namespace,
FQDN: fqdn,
},
Certificate: certificate,
Key: key,
}
template.template = template
return template
}
// ReusedCAIssuerTemplate is a template to render cert-manager issuer from an existing root CA.
type ReusedCAIssuerTemplate struct {
IssuerTemplate
Certificate string
Key string
Template string
}
// Render writers the issuer text in the wr parameter.
func (data ReusedCAIssuerTemplate) Render(wr io.Writer) error {
t := template.Must(template.New("issuer").Parse(reusedCAIssuerTemplate + uyuniCAIssuer))
return t.Execute(wr, data)
}
070701000000be000081a400000000000000000000000168ed21dd000007ab000000000000000000000000000000000000003100000000mgradm/shared/templates/salineServiceTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
const salineServiceTemplate = `
# uyuni-saline.service, generated by mgradm
# Use an uyuni-saline.service.d/custom.conf file to override
[Unit]
Description=Uyuni Saline image container service
Wants=network.target uyuni-server.service
Requires=uyuni-server.service
After=network-online.target uyuni-server.service
RequiresMountsFor=%t/containers
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
ExecStartPre=/bin/rm -f %t/uyuni-saline-%i.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/podman rm --ignore --force -t 10 {{ .NamePrefix }}-saline-%i
ExecStart=/bin/sh -c '/usr/bin/podman run \
--conmon-pidfile %t/uyuni-saline-%i.pid \
--cidfile=%t/%n-%i.ctr-id \
--cgroups=no-conmon \
--sdnotify=conmon \
--security-opt label=type:container_init_t \
-d \
--name {{ .NamePrefix }}-saline-%i \
--hostname {{ .NamePrefix }}-saline-%i.mgr.internal \
--network-alias saline \
--network {{ .Network }} \
{{- range .Volumes }}
-v {{ .Name }}:{{ .MountPath }} \
{{- end }}
-e TZ=${TZ} \
-e NOSSL=YES \
${UYUNI_SALINE_IMAGE}'
ExecStop=/usr/bin/podman stop --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
ExecStopPost=/usr/bin/podman rm -f --ignore -t 10 --cidfile=%t/%n-%i.ctr-id
PIDFile=%t/uyuni-saline-%i.pid
TimeoutStopSec=20
TimeoutStartSec=10
Type=forking
[Install]
WantedBy=multi-user.target default.target
`
// SalineServiceTemplateData holds information to create systemd file for saline container.
type SalineServiceTemplateData struct {
NamePrefix string
Image string
Network string
Volumes []types.VolumeMount
}
// Render will create the systemd configuration file.
func (data SalineServiceTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("service").Parse(salineServiceTemplate))
return t.Execute(wr, data)
}
070701000000bf000081a400000000000000000000000168ed21dd00000c01000000000000000000000000000000000000002b00000000mgradm/shared/templates/serviceTemplate.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/types"
)
const serviceTemplate = `# uyuni-server.service, generated by mgradm
# Use an uyuni-server.service.d/local.conf file to override
[Unit]
Description=Uyuni server image container service
Wants=network.target
After=network-online.target
RequiresMountsFor=%t/containers
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
ExecStartPre=/bin/rm -f %t/uyuni-server.pid %t/%n.ctr-id
ExecStartPre=/usr/bin/podman rm --ignore --force -t 10 {{ .NamePrefix }}-server
ExecStart=/bin/sh -c '/usr/bin/podman run \
--conmon-pidfile %t/uyuni-server.pid \
--cidfile=%t/%n.ctr-id \
--cgroups=no-conmon \
--shm-size=0 \
--shm-size-systemd=0 \
--sdnotify=conmon \
-d \
--name {{ .NamePrefix }}-server \
--hostname {{ .NamePrefix }}-server.mgr.internal \
{{ .Args }} \
{{- range .Ports }}
-p {{ .Exposed }}:{{ .Port }}{{if .Protocol}}/{{ .Protocol }}{{end}} \
{{- if $.IPV6Enabled }}
-p [::]:{{ .Exposed }}:{{ .Port }}{{if .Protocol}}/{{ .Protocol }}{{end}} \
{{- end }}
{{- end }}
{{- range .Volumes }}
-v {{ .Name }}:{{ .MountPath }} \
{{- end }}
-e TZ=${TZ} \
-e UYUNI_HOSTNAME=${UYUNI_HOSTNAME} \
--network {{ .Network }} \
--secret {{ .CaSecret }},type=mount,target={{ .CaPath }} \
--secret {{ .CaSecret }},type=mount,target=/usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT \
--secret {{ .CaSecret }},type=mount,target=/srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT \
--secret {{ .CertSecret }},type=mount,target={{ .CertPath }} \
--secret {{ .KeySecret }},type=mount,target={{ .KeyPath }} \
--secret {{ .DBCaSecret }},type=mount,target={{ .DBCaPath }} \
${PODMAN_EXTRA_ARGS} ${UYUNI_IMAGE}'
ExecStop=/usr/bin/podman exec \
uyuni-server \
/bin/bash -c 'spacewalk-service stop'
ExecStop=/usr/bin/podman stop \
--ignore -t 10 \
--cidfile=%t/%n.ctr-id
ExecStopPost=/usr/bin/podman rm \
-f \
--ignore -t 10 \
--cidfile=%t/%n.ctr-id
PIDFile=%t/uyuni-server.pid
TimeoutStopSec=180
TimeoutStartSec=900
Type=forking
[Install]
WantedBy=multi-user.target default.target
`
// PodmanServiceTemplateData POD information to create systemd file.
type PodmanServiceTemplateData struct {
Volumes []types.VolumeMount
NamePrefix string
Args string
Ports []types.PortMap
Image string
Network string
IPV6Enabled bool
CaSecret string
CaPath string
DBCaSecret string
DBCaPath string
CertSecret string
CertPath string
KeySecret string
KeyPath string
AdminUser string
AdminPassword string
ManagerUser string
ManagerPassword string
ReportUser string
ReportPassword string
}
// Render will create the systemd configuration file.
func (data PodmanServiceTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("service").Parse(serviceTemplate))
return t.Execute(wr, data)
}
070701000000c0000081a400000000000000000000000168ed21dd000003ba000000000000000000000000000000000000002500000000mgradm/shared/templates/tlsSecret.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package templates
import (
"io"
"text/template"
"github.com/uyuni-project/uyuni-tools/shared/kubernetes"
)
// Deploy self-signed issuer or CA Certificate and key.
const tlsSecretTemplate = `apiVersion: v1
kind: Secret
type: kubernetes.io/tls
metadata:
name: {{ .Name }}
namespace: {{ .Namespace }}
labels:
app: ` + kubernetes.ServerApp + `
data:
ca.crt: {{ .RootCa }}
tls.crt: {{ .Certificate }}
tls.key: {{ .Key }}
`
// TLSSecretTemplateData contains information to create secret configuration file.
type TLSSecretTemplateData struct {
Name string
Namespace string
RootCa string
Certificate string
Key string
}
// Render creates secret configuration file.
func (data TLSSecretTemplateData) Render(wr io.Writer) error {
t := template.Must(template.New("secret").Parse(tlsSecretTemplate))
return t.Execute(wr, data)
}
070701000000c1000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001800000000mgradm/shared/templates070701000000c2000081a400000000000000000000000168ed21dd0000377b000000000000000000000000000000000000002100000000mgradm/shared/utils/cmd_utils.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"fmt"
"path"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/ssl"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
var defaultImage = path.Join(utils.DefaultRegistry, "server")
// UseProvided return true if server can use an SSL Cert provided by flags.
func (f *InstallSSLFlags) UseProvided() bool {
return f.Server.IsDefined() && f.Ca.IsThirdParty()
}
// UseProvidedDB return true if DB can use an SSL Cert provided by flags.
func (f *InstallSSLFlags) UseProvidedDB() bool {
return f.DB.IsDefined() && f.DB.CA.IsThirdParty()
}
// UseMigratedCa returns true if a migrated CA and key can be used.
func (f *InstallSSLFlags) UseMigratedCa() bool {
return f.Ca.Root != "" && f.Ca.Key != ""
}
// CheckParameters checks that all the required flags are passed if using 3rd party certificates.
//
// localDB indicates whether the SSL certificates for the database need to be checked.
// Those are not needed for external databases.
func (f *InstallSSLFlags) CheckParameters(localDB bool) {
if !f.UseProvided() && (f.Server.Cert != "" || f.Server.Key != "" || f.Ca.IsDefined()) {
log.Fatal().Msg(L("Server certificate, key and root CA need to be all provided"))
}
if f.UseProvided() && localDB && !f.DB.IsDefined() {
log.Fatal().Msg(L("Database certificate and key need to be provided"))
}
}
// CheckUpgradeParameters checks that all the required flags are passed if using 3rd party certificates.
//
// localDB indicates whether the SSL certificates for the database need to be checked.
// Those are not needed for external databases.
func (f *InstallSSLFlags) CheckUpgradeParameters(localDB bool) {
if !f.UseProvidedDB() && (f.DB.Cert != "" || f.DB.Key != "" || f.DB.IsDefined()) {
log.Fatal().Msg(L("DB certificate, key and root CA need to be all provided"))
}
if f.UseProvided() && localDB && !f.DB.IsDefined() {
log.Fatal().Msg(L("Database certificate and key need to be provided"))
}
}
// AddHelmInstallFlag add Helm install flags to a command.
func AddHelmInstallFlag(cmd *cobra.Command) {
cmd.Flags().String("kubernetes-uyuni-namespace", "default", L("Kubernetes namespace where to install uyuni"))
cmd.Flags().String("kubernetes-certmanager-namespace", "cert-manager",
L("Kubernetes namespace where to install cert-manager"),
)
cmd.Flags().String("kubernetes-certmanager-chart", "",
L("URL to the cert-manager helm chart. To be used for offline installations"),
)
cmd.Flags().String("kubernetes-certmanager-version", "", L("Version of the cert-manager helm chart"))
cmd.Flags().String("kubernetes-certmanager-values", "",
L("Path to a values YAML file to use for cert-manager helm install"),
)
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "helm", Title: L("Helm Chart Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-uyuni-namespace", "helm")
_ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-namespace", "helm")
_ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-chart", "helm")
_ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-version", "helm")
_ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-values", "helm")
}
const volumesFlagsGroupID = "volumes"
// AddVolumesFlags adds the Kubernetes volumes configuration parameters to the command.
func AddVolumesFlags(cmd *cobra.Command) {
cmd.Flags().String("volumes-class", "", L("Default storage class for all the volumes"))
cmd.Flags().String("volumes-mirror", "",
L("PersistentVolume name to use as a mirror. Empty means no mirror is used"),
)
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: volumesFlagsGroupID, Title: L("Volumes Configuration Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "volumes-class", volumesFlagsGroupID)
_ = utils.AddFlagToHelpGroupID(cmd, "volumes-mirror", volumesFlagsGroupID)
addVolumeFlags(cmd, "database", "var-pgsql", "50Gi")
addVolumeFlags(cmd, "packages", "var-spacewalk", "100Gi")
addVolumeFlags(cmd, "www", "srv-www", "100Gi")
addVolumeFlags(cmd, "cache", "var-cache", "10Gi")
}
func addVolumeFlags(cmd *cobra.Command, name string, volumeName string, size string) {
sizeName := fmt.Sprintf("volumes-%s-size", name)
cmd.Flags().String(
sizeName, size, fmt.Sprintf(L("Requested size for the %s volume"), volumeName),
)
_ = utils.AddFlagToHelpGroupID(cmd, sizeName, volumesFlagsGroupID)
className := fmt.Sprintf("volumes-%s-class", name)
cmd.Flags().String(
className, "", fmt.Sprintf(L("Requested storage class for the %s volume"), volumeName),
)
_ = utils.AddFlagToHelpGroupID(cmd, className, volumesFlagsGroupID)
}
// AddContainerImageFlags add container image flags to command.
func AddContainerImageFlags(
cmd *cobra.Command,
container string,
displayName string,
groupName string,
imageName string,
) {
defaultImage := path.Join(utils.DefaultRegistry, imageName)
cmd.Flags().String(container+"-image", defaultImage,
fmt.Sprintf(L("Image for %s container"), displayName))
cmd.Flags().String(container+"-tag", "",
fmt.Sprintf(L("Tag for %s container, overrides the global value if set"), displayName))
if groupName != "" {
_ = utils.AddFlagToHelpGroupID(cmd, container+"-image", groupName)
_ = utils.AddFlagToHelpGroupID(cmd, container+"-tag", groupName)
}
}
// addReportDBFlags add ReportDB flags to a command.
func AddReportDBFlags(cmd *cobra.Command) {
cmd.Flags().String("reportdb-name", "reportdb", L("Report database name"))
cmd.Flags().String("reportdb-host", "reportdb", L("Report database host"))
cmd.Flags().Int("reportdb-port", 5432, L("Report database port"))
cmd.Flags().String("reportdb-user", "pythia_susemanager", L("Report Database username"))
cmd.Flags().String("reportdb-password", "", L("Report database password. Randomly generated by default"))
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "reportdb", Title: L("Report DB Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "reportdb-name", "reportdb")
_ = utils.AddFlagToHelpGroupID(cmd, "reportdb-host", "reportdb")
_ = utils.AddFlagToHelpGroupID(cmd, "reportdb-port", "reportdb")
_ = utils.AddFlagToHelpGroupID(cmd, "reportdb-user", "reportdb")
_ = utils.AddFlagToHelpGroupID(cmd, "reportdb-password", "reportdb")
}
// addDBFlags add DB flags to a command.
func AddDBFlags(cmd *cobra.Command) {
cmd.Flags().String("db-user", "spacewalk", L("Database user"))
cmd.Flags().String("db-password", "", L("Database password. Randomly generated by default"))
cmd.Flags().String("db-name", "susemanager", L("Database name"))
cmd.Flags().String("db-host", "db", L("Database host"))
cmd.Flags().Int("db-port", 5432, L("Database port"))
cmd.Flags().String("db-admin-user", "postgres", L("Database admin user name"))
cmd.Flags().String("db-admin-password", "", L("Database admin password"))
cmd.Flags().String("db-provider", "", L("External database provider. Possible values 'aws'"))
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "db", Title: L("Database Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "db-user", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-password", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-name", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-host", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-port", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-admin-user", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-admin-password", "db")
_ = utils.AddFlagToHelpGroupID(cmd, "db-provider", "db")
}
// AddSCCFlag add SCC flags to a command.
func AddSCCFlag(cmd *cobra.Command) {
cmd.Flags().String("scc-user", "", L(`SUSE Customer Center username.
It will be used as SCC credentials for products synchronization and to pull images from registry.suse.com`))
cmd.Flags().String("scc-password", "", L(`SUSE Customer Center password.
It will be used as SCC credentials for products synchronization and to pull images from registry.suse.com`))
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "scc", Title: L("SUSE Customer Center Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "scc-user", "scc")
_ = utils.AddFlagToHelpGroupID(cmd, "scc-password", "scc")
}
// AddImageFlag add Image flags to a command.
func AddImageFlag(cmd *cobra.Command) {
cmd.Flags().String("image", defaultImage, L("Image"))
cmd.Flags().String("registry", utils.DefaultRegistry, L("Specify a registry where to pull the images from"))
cmd.Flags().String("tag", utils.DefaultTag, L("Tag Image"))
utils.AddPullPolicyFlag(cmd)
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "image", Title: L("Image Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "image", "image")
_ = utils.AddFlagToHelpGroupID(cmd, "registry", "") // without group, since this flag is applied to all the images
_ = utils.AddFlagToHelpGroupID(cmd, "tag", "image")
_ = utils.AddFlagToHelpGroupID(cmd, "pullPolicy", "") // without group, since this flag is applied to all the images
}
// AddDBUpgradeImageFlag add Database upgrade image flags to a command.
func AddDBUpgradeImageFlag(cmd *cobra.Command) {
cmd.Flags().String("dbupgrade-image", "", L("Database upgrade image"))
cmd.Flags().String("dbupgrade-tag", "latest", L("Database upgrade image tag"))
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "dbupgrade-image", Title: L("Database Upgrade Image Flags")})
_ = utils.AddFlagToHelpGroupID(cmd, "dbupgrade-image", "dbupgrade-image")
_ = utils.AddFlagToHelpGroupID(cmd, "dbupgrade-tag", "dbupgrade-image")
}
// AddMirrorFlag adds the flag for the mirror.
func AddMirrorFlag(cmd *cobra.Command) {
cmd.Flags().String("mirror", "", L("Path to mirrored packages mounted on the host"))
}
// AddCocoFlag adds the confidential computing related parameters to cmd.
func AddCocoFlag(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "coco-container", Title: L("Confidential Computing Flags")})
AddContainerImageFlags(cmd, "coco", L("Confidential computing attestation"), "coco-container", "server-attestation")
cmd.Flags().Int("coco-replicas", 0, L("How many replicas of the confidential computing container should be started"))
_ = utils.AddFlagToHelpGroupID(cmd, "coco-replicas", "coco-container")
}
// AddUpgradeCocoFlag adds the confidential computing related parameters to cmd upgrade.
func AddUpgradeCocoFlag(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "coco-container", Title: L("Confidential Computing Flags")})
AddContainerImageFlags(cmd, "coco", L("Confidential computing attestation"), "coco-container", "server-attestation")
cmd.Flags().Int("coco-replicas", 0, L(`How many replicas of the confidential computing container should be started.
Leave it unset if you want to keep the previous number of replicas.`))
_ = utils.AddFlagToHelpGroupID(cmd, "coco-replicas", "coco-container")
}
// AddHubXmlrpcFlags adds hub XML-RPC related parameters to cmd.
func AddHubXmlrpcFlags(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "hubxmlrpc-container", Title: L("Hub XML-RPC API")})
AddContainerImageFlags(cmd, "hubxmlrpc", L("Hub XML-RPC API"), "hubxmlrpc-container", "server-hub-xmlrpc-api")
cmd.Flags().Int("hubxmlrpc-replicas", 0,
L("How many replicas of the Hub XML-RPC API service container should be started."),
)
_ = utils.AddFlagToHelpGroupID(cmd, "hubxmlrpc-replicas", "hubxmlrpc-container")
}
// AddUpgradeHubXmlrpcFlags adds hub XML-RPC related parameters to cmd upgrade.
func AddUpgradeHubXmlrpcFlags(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "hubxmlrpc-container", Title: L("Hub XML-RPC API")})
AddContainerImageFlags(cmd, "hubxmlrpc", L("Hub XML-RPC API"), "hubxmlrpc-container", "server-hub-xmlrpc-api")
cmd.Flags().Int("hubxmlrpc-replicas", 0,
L(`How many replicas of the Hub XML-RPC API service container should be started.
Leave it unset if you want to keep the previous number of replicas.`))
_ = utils.AddFlagToHelpGroupID(cmd, "hubxmlrpc-replicas", "hubxmlrpc-container")
}
// AddSalineFlag adds the Saline related parameters to cmd.
func AddSalineFlag(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "saline-container", Title: L("Saline Flags")})
AddContainerImageFlags(cmd, "saline", L("Saline"), "saline-container", "server-saline")
cmd.Flags().Int("saline-replicas", 0, L(`How many replicas of the Saline container should be started
(only 0 or 1 supported for now)`))
cmd.Flags().Int("saline-port", 8216, L("Saline port"))
_ = utils.AddFlagToHelpGroupID(cmd, "saline-replicas", "saline-container")
_ = utils.AddFlagToHelpGroupID(cmd, "saline-port", "saline-container")
}
// AddUpgradeSalineFlag adds the Saline related parameters to cmd upgrade.
func AddUpgradeSalineFlag(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "saline-container", Title: L("Saline Flags")})
AddContainerImageFlags(cmd, "saline", L("Saline"), "saline-container", "server-saline")
cmd.Flags().Int("saline-replicas", 0, L(`How many replicas of the Saline container should be started.
Leave it unset if you want to keep the previous number of replicas.
(only 0 or 1 supported for now)`))
cmd.Flags().Int("saline-port", 8216, L("Saline port"))
_ = utils.AddFlagToHelpGroupID(cmd, "saline-replicas", "saline-container")
_ = utils.AddFlagToHelpGroupID(cmd, "saline-port", "saline-container")
}
// AddPgsqlFlags adds PostgreSQL related parameters to cmd.
func AddPgsqlFlags(cmd *cobra.Command) {
_ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "pgsql-container", Title: L("PostgreSQL Database Container Flags")})
AddContainerImageFlags(cmd, "pgsql", L("PostgreSQL Database"), "pgsql-container", "server-postgresql")
}
// AddServerFlags add flags common to install, upgrade and migration.
func AddServerFlags(cmd *cobra.Command) {
AddImageFlag(cmd)
AddSCCFlag(cmd)
AddPgsqlFlags(cmd)
AddDBFlags(cmd)
AddReportDBFlags(cmd)
ssl.AddSSLGenerationFlags(cmd)
ssl.AddSSLThirdPartyFlags(cmd)
ssl.AddSSLDBThirdPartyFlags(cmd)
cmd.Flags().String("ssl-password", "", L("Password for the CA key to generate"))
_ = utils.AddFlagToHelpGroupID(cmd, "ssl-password", ssl.GeneratedFlagsGroup)
}
070701000000c3000081a400000000000000000000000168ed21dd000017b8000000000000000000000000000000000000001c00000000mgradm/shared/utils/exec.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"errors"
"fmt"
"os/exec"
"strings"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
"github.com/uyuni-project/uyuni-tools/shared"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// ExecCommand execute commands passed as argument in the current system.
func ExecCommand(logLevel zerolog.Level, cnx *shared.Connection, args ...string) error {
podName, err := cnx.GetPodName()
if err != nil {
return utils.Error(err, L("exec command failed"))
}
commandArgs := []string{"exec", podName}
command, err := cnx.GetCommand()
if err != nil {
log.Fatal().Err(err)
}
if command == "kubectl" {
namespace, err := cnx.GetNamespace("")
if namespace == "" {
return utils.Error(err, L("failed retrieving namespace"))
}
commandArgs = append(commandArgs, "-n", namespace, "-c", "uyuni", "--")
}
commandArgs = append(commandArgs, "sh", "-c", strings.Join(args, " "))
runCmd := exec.Command(command, commandArgs...)
logger := log.Logger.Level(logLevel)
runCmd.Stdout = logger
runCmd.Stderr = logger
return runCmd.Run()
}
// GeneratePgsqlVersionUpgradeScript generates the PostgreSQL version upgrade script.
func GeneratePgsqlVersionUpgradeScript(
oldPgVersion string,
newPgVersion string,
backupDir string,
) (string, error) {
data := templates.PostgreSQLVersionUpgradeTemplateData{
OldVersion: oldPgVersion,
NewVersion: newPgVersion,
BackupDir: backupDir,
}
scriptBuilder := new(strings.Builder)
if err := data.Render(scriptBuilder); err != nil {
return "", utils.Error(err, L("failed to render database upgrade script"))
}
return scriptBuilder.String(), nil
}
// GenerateFinalizePostgresScript generates the script to finalize PostgreSQL upgrade.
func GenerateFinalizePostgresScript(
runReindex bool, runSchemaUpdate bool, migration bool, kubernetes bool,
) (string, error) {
data := templates.FinalizePostgresTemplateData{
RunReindex: runReindex,
RunSchemaUpdate: runSchemaUpdate,
Migration: migration,
Kubernetes: kubernetes,
}
scriptBuilder := new(strings.Builder)
if err := data.Render(scriptBuilder); err != nil {
return "", utils.Error(err, L("failed to render database finalization script"))
}
return scriptBuilder.String(), nil
}
// GeneratePostUpgradeScript generates the script to be run after upgrade.
func GeneratePostUpgradeScript() (string, error) {
data := templates.PostUpgradeTemplateData{}
scriptBuilder := new(strings.Builder)
if err := data.Render(scriptBuilder); err != nil {
return "", utils.Error(err, L("failed to render database post upgrade script"))
}
return scriptBuilder.String(), nil
}
// RunMigration execute the migration script.
func RunMigration(cnx *shared.Connection, scriptName string) error {
log.Info().Msg(L("Migrating server"))
err := ExecCommand(zerolog.InfoLevel, cnx, "/var/lib/uyuni-tools/"+scriptName)
if err != nil {
return utils.Error(err, L("error running the migration script"))
}
return nil
}
// GenerateMigrationScript generates the script that perform migration.
func GenerateMigrationScript(
sourceFqdn string,
user string,
kubernetes bool,
prepare bool,
dbHost string,
reportDBHost string,
) (string, error) {
// For podman we want to backup tls certificates to the temporary volume we
// later use when creating secrets.
volumes := append(utils.ServerVolumeMounts, utils.VarPgsqlDataVolumeMount)
if !kubernetes {
volumes = append(volumes, utils.EtcTLSTmpVolumeMount)
}
data := templates.MigrateScriptTemplateData{
Volumes: volumes,
SourceFqdn: sourceFqdn,
User: user,
Kubernetes: kubernetes,
Prepare: prepare,
DBHost: dbHost,
ReportDBHost: reportDBHost,
}
scriptBuilder := new(strings.Builder)
if err := data.Render(scriptBuilder); err != nil {
return "", utils.Error(err, L("failed to generate migration script"))
}
return scriptBuilder.String(), nil
}
// SanityCheck verifies if an upgrade can be run.
func SanityCheck(runningValues *utils.ServerInspectData, inspectedValues *utils.ServerInspectData) error {
// Skip the uyuni / SUSE Manager release checks if the runningValues is nil.
if runningValues == nil {
return nil
}
isUyuni := runningValues.UyuniRelease != ""
isUyuniImage := inspectedValues.UyuniRelease != ""
isSumaImage := inspectedValues.SuseManagerRelease != ""
if isUyuni && isSumaImage {
return fmt.Errorf(
L("currently SUSE Manager %s is installed, instead the image is Uyuni. Upgrade is not supported"),
inspectedValues.SuseManagerRelease,
)
}
if !isUyuni && isUyuniImage {
return fmt.Errorf(
L("currently Uyuni %s is installed, instead the image is SUSE Manager. Upgrade is not supported"),
inspectedValues.UyuniRelease,
)
}
if isUyuni {
currentUyuniRelease := runningValues.UyuniRelease
log.Debug().Msgf("Current release is %s", string(currentUyuniRelease))
if !isUyuniImage {
return errors.New(L("cannot fetch release from server image"))
}
log.Debug().Msgf("Server image release is %s", inspectedValues.UyuniRelease)
if utils.CompareVersion(inspectedValues.UyuniRelease, string(currentUyuniRelease)) < 0 {
return fmt.Errorf(
L("cannot downgrade from version %[1]s to %[2]s"),
string(currentUyuniRelease), inspectedValues.UyuniRelease,
)
}
} else {
currentSuseManagerRelease := runningValues.SuseManagerRelease
log.Debug().Msgf("Current release is %s", currentSuseManagerRelease)
if !isSumaImage {
return errors.New(L("cannot fetch release from server image"))
}
log.Debug().Msgf("Server image release is %s", inspectedValues.SuseManagerRelease)
if utils.CompareVersion(inspectedValues.SuseManagerRelease, currentSuseManagerRelease) < 0 {
return fmt.Errorf(
L("cannot downgrade from version %[1]s to %[2]s"),
currentSuseManagerRelease, inspectedValues.SuseManagerRelease,
)
}
}
return nil
}
070701000000c4000081a400000000000000000000000168ed21dd00000733000000000000000000000000000000000000002100000000mgradm/shared/utils/exec_test.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"fmt"
"strings"
"testing"
"github.com/uyuni-project/uyuni-tools/shared/testutils"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
func TestSanityCheck(t *testing.T) {
type dataType struct {
oldUyuniRelease string
oldSumaRelease string
oldPgsqlVersion string
newUyuniRelease string
newSumaRelease string
newPgsqlVersion string
errorPart string
}
data := []dataType{
{"2024.07", "", "16", "2024.13", "", "17", ""},
{"", "5.0.1", "16", "", "5.1.0", "17", ""},
{
"2024.13", "", "17", "2024.07", "", "16",
"cannot downgrade",
},
{
"", "5.1.0", "17", "", "5.0.1", "16",
"cannot downgrade",
},
{
"2024.07", "", "16", "", "5.1.0", "17",
"Upgrade is not supported",
},
{
"", "5.1.0", "17", "2024.07", "", "16",
"Upgrade is not supported",
},
}
for i, test := range data {
runningValues := utils.ServerInspectData{
UyuniRelease: test.oldUyuniRelease,
SuseManagerRelease: test.oldSumaRelease,
}
newValues := utils.ServerInspectData{
CommonInspectData: utils.CommonInspectData{
CurrentPgVersion: test.oldPgsqlVersion,
},
DBInspectData: utils.DBInspectData{
ImagePgVersion: test.newPgsqlVersion,
},
UyuniRelease: test.newUyuniRelease,
SuseManagerRelease: test.newSumaRelease,
}
err := SanityCheck(&runningValues, &newValues)
if test.errorPart != "" {
if err != nil {
testutils.AssertTrue(
t, fmt.Sprintf("test %d: Unexpected error message: %s", i+1, err),
strings.Contains(err.Error(), test.errorPart),
)
} else {
t.Errorf("test %d: expected an error, got none", i+1)
}
} else {
testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected error", i+1), nil, err)
}
}
}
070701000000c5000081a400000000000000000000000168ed21dd000015f3000000000000000000000000000000000000001d00000000mgradm/shared/utils/flags.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"fmt"
"net/mail"
"regexp"
"strings"
"github.com/spf13/cobra"
apiTypes "github.com/uyuni-project/uyuni-tools/shared/api/types"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/podman"
"github.com/uyuni-project/uyuni-tools/shared/types"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// ServerFlags is a structure hosting the parameters for installation, migration and upgrade.
type ServerFlags struct {
Image types.ImageFlags `mapstructure:",squash"`
Coco CocoFlags
Mirror string
HubXmlrpc HubXmlrpcFlags
Migration MigrationFlags `mapstructure:",squash"`
Installation InstallationFlags `mapstructure:",squash"`
// DBUpgradeImage is the image to use to perform the database upgrade.
DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"`
Saline SalineFlags
Pgsql types.PgsqlFlags
}
// MigrationFlags contains the parameters that are used only for migration.
type MigrationFlags struct {
// Prepare defines whether to run the full migration or just the data synchronization.
Prepare bool
// SourceUser is the username to use to connect to the source server in a migration.
User string
}
// InstallationFlags contains the parameters that are used only for the installation of a new server.
type InstallationFlags struct {
TZ string
Email string
EmailFrom string
IssParent string
Tftp bool
DB DBFlags
ReportDB DBFlags
SSL InstallSSLFlags
SCC types.SCCCredentials
Debug DebugFlags
Admin apiTypes.User
Organization string
}
var systemd podman.Systemd = podman.NewSystemd()
// CheckUpgradeParameters verifies the consistency of the parameters for upgrade and migrate commands.
func (flags *InstallationFlags) CheckUpgradeParameters(cmd *cobra.Command, command string) {
flags.setPasswordIfMissing()
flags.checkUpgradeSSLParameters(cmd, command)
}
func (flags *InstallationFlags) setPasswordIfMissing() {
if flags.DB.Password == "" {
flags.DB.Password = utils.GetRandomBase64(30)
}
if flags.ReportDB.Password == "" {
flags.ReportDB.Password = utils.GetRandomBase64(30)
}
// The admin password is only needed for local database
if flags.DB.IsLocal() && flags.DB.Admin.Password == "" {
flags.DB.Admin.Password = utils.GetRandomBase64(30)
}
}
func (flags *InstallationFlags) checkSSLParameters(cmd *cobra.Command, command string) {
// Make sure we have all the required 3rd party flags or none
flags.SSL.CheckParameters(flags.DB.IsLocal())
// Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it
if !flags.SSL.UseProvided() && command == "podman" {
utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0)
}
}
func (flags *InstallationFlags) checkUpgradeSSLParameters(cmd *cobra.Command, command string) {
// Make sure we have all the required 3rd party flags or none
flags.SSL.CheckUpgradeParameters(flags.DB.IsLocal())
// Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it
if !flags.SSL.UseProvidedDB() && command == "podman" && !systemd.HasService(podman.DBService) {
utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0)
}
}
// CheckParameters checks parameters for install command.
func (flags *InstallationFlags) CheckParameters(cmd *cobra.Command, command string) {
flags.setPasswordIfMissing()
flags.checkSSLParameters(cmd, command)
// Use the host timezone if the user didn't define one
if flags.TZ == "" {
flags.TZ = utils.GetLocalTimezone()
}
utils.AskIfMissing(&flags.Email, cmd.Flag("email").Usage, 1, 128, emailChecker)
utils.AskIfMissing(&flags.EmailFrom, cmd.Flag("emailfrom").Usage, 0, 0, emailChecker)
utils.AskIfMissing(&flags.Admin.Login, cmd.Flag("admin-login").Usage, 1, 64, idChecker)
utils.AskPasswordIfMissing(&flags.Admin.Password, cmd.Flag("admin-password").Usage, 5, 48)
utils.AskIfMissing(&flags.Organization, cmd.Flag("organization").Usage, 3, 128, nil)
flags.SSL.Email = flags.Email
flags.Admin.Email = flags.Email
}
// DBFlags can store all values required to connect to a database.
type DBFlags struct {
Host string
Name string
Port int
User string
Password string
Provider string
Admin struct {
User string
Password string
}
}
// IsLocal indicates if the database is a local or a third party one.
func (flags *DBFlags) IsLocal() bool {
return flags.Host == "" || flags.Host == "db" || flags.Host == "reportdb"
}
// DebugFlags contains information about enabled/disabled debug.
type DebugFlags struct {
Java bool
}
// idChecker verifies that the value is a valid identifier.
func idChecker(value string) bool {
r := regexp.MustCompile(`^([[:alnum:]]|[._-])+$`)
if r.MatchString(value) {
return true
}
fmt.Println(L("Can only contain letters, digits . _ and -"))
return false
}
// emailChecker verifies that the value is a valid email address.
func emailChecker(value string) bool {
address, err := mail.ParseAddress(value)
if err != nil || address.Name != "" || strings.ContainsAny(value, "<>") {
fmt.Println(L("Not a valid email address"))
return false
}
return true
}
// SSHFlags is the structure holding the SSH configuration to use to connect to the source server to migrate.
type SSHFlags struct {
Key struct {
Public string
Private string
}
Knownhosts string
Config string
}
070701000000c6000081a400000000000000000000000168ed21dd000003c6000000000000000000000000000000000000002200000000mgradm/shared/utils/flags_test.go// SPDX-FileCopyrightText: 2024 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import "testing"
func TestIdChecker(t *testing.T) {
data := map[string]bool{
"foo": true,
"foo bar": false,
"\u798f": false,
"foo123._-": true,
"foo+": false,
"foo&": false,
"foo'": false,
"foo\"": false,
"foo`": false,
"foo=": false,
"foo#": false,
}
for value, expected := range data {
actual := idChecker(value)
if actual != expected {
t.Errorf("%s: expected %v got %v", value, expected, actual)
}
}
}
func TestEmailChecker(t *testing.T) {
data := map[string]bool{
"root@localhost": true,
"joe.hacker@foo.bar.com": true,
"<joe.hacker@foo.bar.com>": false,
"fooo": false,
}
for value, expected := range data {
actual := emailChecker(value)
if actual != expected {
t.Errorf("%s: expected %v got %v", value, expected, actual)
}
}
}
070701000000c7000081a400000000000000000000000168ed21dd00000a23000000000000000000000000000000000000001d00000000mgradm/shared/utils/setup.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"strconv"
"strings"
"github.com/uyuni-project/uyuni-tools/mgradm/shared/templates"
. "github.com/uyuni-project/uyuni-tools/shared/l10n"
"github.com/uyuni-project/uyuni-tools/shared/utils"
)
// GetSetupEnv computes the environment variables required by the setup script from the flags.
// As the requirements are slightly different for kubernetes there is a toggle parameter for it.
func GetSetupEnv(mirror string, flags *InstallationFlags, fqdn string, kubernetes bool) map[string]string {
dbPort := "5432"
if flags.DB.Port != 0 {
dbPort = strconv.Itoa(flags.DB.Port)
}
reportdbPort := "5432"
if flags.ReportDB.Port != 0 {
reportdbPort = strconv.Itoa(flags.ReportDB.Port)
}
env := map[string]string{
"UYUNI_FQDN": fqdn,
"MANAGER_ADMIN_EMAIL": flags.Email,
"MANAGER_MAIL_FROM": flags.EmailFrom,
"MANAGER_ENABLE_TFTP": boolToString(flags.Tftp),
"MANAGER_DB_NAME": flags.DB.Name,
"MANAGER_DB_HOST": flags.DB.Host,
"MANAGER_DB_PORT": dbPort,
"REPORT_DB_NAME": flags.ReportDB.Name,
"REPORT_DB_HOST": flags.ReportDB.Host,
"REPORT_DB_PORT": reportdbPort,
"EXTERNALDB_PROVIDER": flags.DB.Provider,
"ISS_PARENT": flags.IssParent,
}
if kubernetes {
env["NO_SSL"] = "Y"
} else {
// Only add the credentials for podman as we have secret for Kubernetes.
env["ADMIN_USER"] = flags.Admin.Login
env["ADMIN_PASS"] = flags.Admin.Password
env["SCC_USER"] = flags.SCC.User
env["SCC_PASS"] = flags.SCC.Password
}
if mirror != "" {
env["MIRROR_PATH"] = "/mirror"
}
return env
}
func boolToString(value bool) string {
if value {
return "Y"
}
return "N"
}
// GenerateSetupScript creates a temporary folder with the setup script to execute in the container.
// The script exports all the needed environment variables and calls uyuni's mgr-setup.
func GenerateSetupScript(flags *InstallationFlags, nossl bool) (string, error) {
template := templates.MgrSetupScriptTemplateData{
DebugJava: flags.Debug.Java,
OrgName: flags.Organization,
AdminLogin: "$ADMIN_USER",
AdminPassword: "$ADMIN_PASS",
AdminFirstName: flags.Admin.FirstName,
AdminLastName: flags.Admin.LastName,
AdminEmail: flags.Admin.Email,
NoSSL: nossl,
}
// Prepare the script
scriptBuilder := new(strings.Builder)
if err := template.Render(scriptBuilder); err != nil {
return "", utils.Error(err, L("failed to render setup script"))
}
return scriptBuilder.String(), nil
}
070701000000c8000081a400000000000000000000000168ed21dd000009c4000000000000000000000000000000000000001d00000000mgradm/shared/utils/types.go// SPDX-FileCopyrightText: 2025 SUSE LLC
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"github.com/uyuni-project/uyuni-tools/shared/types"
)
// InstallSSLFlags holds all the flags values related to SSL for installation.
type InstallSSLFlags struct {
types.SSLCertGenerationFlags `mapstructure:",squash"`
// Ca is the certification authority chain used for the server certificate.
Ca types.CaChain
// Server is the SSL certificate and key pair for the apache server.
Server types.SSLPair
// DB is the SSL key pair and the corresponding CA chain for local database.
// If the CA chain is not provided, the main one will be assumed.
DB SSLFlags
}
// SSLFlags represents an SSL certificate and key with the CA chain.
type SSLFlags struct {
types.SSLPair `mapstructure:",squash"`
CA types.CaChain
}
// KubernetesFlags stores Uyuni and Cert Manager kubernetes specific parameters.
type KubernetesFlags struct {
Uyuni types.ChartFlags
CertManager types.ChartFlags
}
// HubXmlrpcFlags contains settings for Hub XMLRPC container.
type HubXmlrpcFlags struct {
Replicas int
Image types.ImageFlags `mapstructure:",squash"`
IsChanged bool
}
// CocoFlags contains settings for coco attestation container.
type CocoFlags struct {
Replicas int
Image types.ImageFlags `mapstructure:",squash"`
IsChanged bool
}
// SalineFlags contains settings for Saline container.
type SalineFlags struct {
Port int
Replicas int
Image types.ImageFlags `mapstructure:",squash"`
IsChanged bool
}
// VolumeFlags stores the persistent volume claims configuration.
type VolumesFlags struct {
// Class is the default storage class for all the persistent volume claims.
Class string
// Database is the configuration of the var-pgsql volume.
Database VolumeFlags
// Packages is the configuration of the var-spacewalk volume containing the synchronizede repositories.
Packages VolumeFlags
// Www is the configuration of the srv-www volume containing the imags and distributions.
Www VolumeFlags
// Cache is the configuration of the var-cache volume.
Cache VolumeFlags
// Mirror is the PersistentVolume name to use in case of a mirror setup.
// An empty value means no mirror will be used.
Mirror string
}
// VolumeFlags is the configuration of one volume.
type VolumeFlags struct {
// Size is the requested size of the volume using kubernetes values like '100Gi'.
Size string
// Class is the storage class of the volume.
Class string
}
070701000000c9000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000001400000000mgradm/shared/utils070701000000ca000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000000e00000000mgradm/shared070701000000cb000041ed00000000000000000000000168ed21dd00000000000000000000000000000000000000000000000700000000mgradm07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000b00000000TRAILER!!!