File k0sctl-0.25.1.obscpio of Package k0sctl

07070100000000000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001600000000k0sctl-0.25.1/.github07070100000001000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001E00000000k0sctl-0.25.1/.github/actions07070100000002000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002F00000000k0sctl-0.25.1/.github/actions/smoke-test-cache07070100000003000081A40000000000000000000000016842976900000363000000000000000000000000000000000000003B00000000k0sctl-0.25.1/.github/actions/smoke-test-cache/action.yamlname: Smoke test cache steps
description: Cache smoke test binaries
runs:
  using: composite
  steps:
    - name: Set up Go
      uses: actions/setup-go@v5
      with:
        go-version-file: go.mod
        check-latest: true

    - name: Download compiled binary artifact
      uses: actions/download-artifact@v4
      with:
        name: k0sctl
        path: .

    - name: k0sctl cache
      uses: actions/cache@v3
      with:
        key: k0sctl-cache
        path: |
          /var/cache/k0sctl/k0s
          ~/.cache/k0sctl/k0s
    
    - name: kubectl cache
      uses: actions/cache@v3
      with:
        path: |
          smoke-test/kubectl
        key: "kubectl-${{ hashFiles('smoke-test/smoke.common.sh') }}"

    - name: Make binaries executable
      shell: bash
      run: |
        chmod +x k0sctl || true
        chmod +x smoke-test/kubectl || true
07070100000004000081A400000000000000000000000168429769000000EE000000000000000000000000000000000000002500000000k0sctl-0.25.1/.github/dependabot.ymlversion: 2
updates:
  - package-ecosystem: "gomod"
    directory: "/"
    schedule:
      interval: "daily"
    open-pull-requests-limit: 5
  - package-ecosystem: github-actions
    directory: /
    schedule:
      interval: daily
      
07070100000005000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002000000000k0sctl-0.25.1/.github/workflows07070100000006000081A40000000000000000000000016842976900000145000000000000000000000000000000000000002F00000000k0sctl-0.25.1/.github/workflows/actionlint.ymlname: Lint (actionlint)
on:
  pull_request:
    paths:
      - .github/**

jobs:
  actionlint:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: reviewdog/action-actionlint@v1
        with:
          fail_on_error: true
          level: warning
          github_token: ${{ github.token }}
07070100000007000081A40000000000000000000000016842976900000BB3000000000000000000000000000000000000003400000000k0sctl-0.25.1/.github/workflows/codeql-analysis.yml# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"

on:
  push:
    branches: [ main ]
  pull_request:
    # The branches below must be a subset of the branches above
    branches: [ main ]
  schedule:
    - cron: '40 16 * * 2'

jobs:
  analyze:
    name: Analyze
    runs-on: ubuntu-latest
    permissions:
      actions: read
      contents: read
      security-events: write

    strategy:
      fail-fast: false
      matrix:
        language: [ 'go' ]
        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support

    steps:
    - name: Checkout repository
      uses: actions/checkout@v4

    # Added as suggested in https://github.com/github/codeql-action/issues/1842 to bring in a newer go than exists
    # preinstalled on the runners
    - name: Set up Go
      uses: actions/setup-go@v5
      with:
        go-version-file: go.mod
        check-latest: true

    # Initializes the CodeQL tools for scanning.
    - name: Initialize CodeQL
      uses: github/codeql-action/init@v3
      with:
        languages: ${{ matrix.language }}
        # If you wish to specify custom queries, you can do so here or in a config file.
        # By default, queries listed here will override any specified in a config file.
        # Prefix the list here with "+" to use these queries and those in the config file.
        
        # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
        # queries: security-extended,security-and-quality

        
    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
    # If this step fails, then you should remove it and run the build manually (see below)
    - name: Autobuild
      uses: github/codeql-action/autobuild@v3

    # â„šī¸ Command-line programs to run using the OS shell.
    # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun

    #   If the Autobuild fails above, remove it and uncomment the following three lines. 
    #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.

    # - run: |
    #   echo "Run, Build Application using script"
    #   ./location_of_script_within_repo/buildscript.sh

    - name: Perform CodeQL Analysis
      uses: github/codeql-action/analyze@v3
07070100000008000081A4000000000000000000000001684297690000045A000000000000000000000000000000000000002900000000k0sctl-0.25.1/.github/workflows/dco.yamlname: DCO

on:
  pull_request:
    branches:
      - main
      - release-*

permissions:
  contents: read

env:
  PYTHON_VERSION: 3.13.1

jobs:
  check:
    name: DCO check
    runs-on: ubuntu-24.04
    steps:
      - name: Checkout k0s
        uses: actions/checkout@v4

      - name: Set up Python ${{ env.PYTHON_VERSION }}
        uses: actions/setup-python@v5
        with:
          python-version: ${{ env.PYTHON_VERSION }}

      - name: Download DCO check script
        env:
          # https://github.com/christophebedard/dco-check/releases/tag/0.4.0
          DCO_CHECK_VERSION: 30353d8deedf393cf55ba33355e71da7fdd095c7
        run: |
          curl --proto '=https' --tlsv1.2 --retry 5 --retry-all-errors -sSLfo dco_check.py \
            'https://raw.githubusercontent.com/christophebedard/dco-check/${{ env.DCO_CHECK_VERSION }}/dco_check/dco_check.py'

      - name: Run DCO check
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
          DCO_CHECK_VERBOSE: "true"
          DCO_CHECK_EXCLUDE_PATTERN: dependabot\[bot\]@users\.noreply\.github\.com
        run: python3 dco_check.py
07070100000009000081A40000000000000000000000016842976900000227000000000000000000000000000000000000003C00000000k0sctl-0.25.1/.github/workflows/dependabot-auto-approve.ymlname: Dependabot auto-approve
on: pull_request

permissions:
  pull-requests: write

jobs:
  dependabot:
    runs-on: ubuntu-latest
    if: ${{ github.actor == 'dependabot[bot]' }}
    steps:
      - name: Dependabot metadata
        id: metadata
        uses: dependabot/fetch-metadata@v1
        with:
          github-token: "${{ secrets.GITHUB_TOKEN }}"

      - name: Approve PR
        run: gh pr review --approve "$PR_URL"
        env:
          PR_URL: ${{github.event.pull_request.html_url}}
          GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
0707010000000A000081A400000000000000000000000168429769000002EA000000000000000000000000000000000000002700000000k0sctl-0.25.1/.github/workflows/go.ymlname: Go unit tests

on: 
  pull_request:
    paths:
      - '**.go'
      - go.mod
      - go.sum
      - Makefile

jobs:
  unit-test:
    strategy:
      matrix:
        os: [ubuntu-latest, macos-latest, windows-latest]
    runs-on: ${{ matrix.os }}

    steps:
    - uses: actions/checkout@v4

    - name: Set up Go
      uses: actions/setup-go@v5
      with:
        go-version-file: go.mod
        check-latest: true

    - name: Test
      run: go test -v ./...

  build:
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v4

    - name: Set up Go
      uses: actions/setup-go@v5
      with:
        go-version-file: go.mod
        check-latest: true

    - name: Verify all binaries can be built
      run: make build-all
0707010000000B000081A40000000000000000000000016842976900000264000000000000000000000000000000000000003200000000k0sctl-0.25.1/.github/workflows/golangci-lint.ymlname: Lint (golangci-lint)
on:
  pull_request:
    paths:
      - '**.go'

jobs:
  golangci-lint:
    name: Run golangci-lint
    runs-on: ubuntu-latest
    steps:
      - name: Check out code into the Go module directory
        uses: actions/checkout@v4

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: go.mod
          check-latest: true

      - name: Run golangci-lint
        uses: golangci/golangci-lint-action@v4
        with:
          version: latest
          skip-cache: true
          only-new-issues: false
          args: --verbose --timeout=10m
0707010000000C000081A40000000000000000000000016842976900000211000000000000000000000000000000000000002F00000000k0sctl-0.25.1/.github/workflows/gomod-lint.ymlname: Lint (go.mod/go.sum)
on:
  pull_request:
    paths:
      - 'go.mod'
      - 'go.sum'

jobs:
  gomod-lint:
    name: Validate go module file consistency
    runs-on: ubuntu-latest
    steps:
      - name: Check out code into the Go module directory
        uses: actions/checkout@v4

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: go.mod
          check-latest: true

      - name: Check go.mod/go.sum to be consistent
        run: go mod tidy -v && git diff --exit-code
0707010000000D000081A400000000000000000000000168429769000004B6000000000000000000000000000000000000002C00000000k0sctl-0.25.1/.github/workflows/release.ymlname: Release

on:
  push:
    tags:
      - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10

jobs:
  release:
    name: release
    runs-on: ubuntu-latest
    steps:
      - name: Check out code into the Go module directory
        uses: actions/checkout@v4
    
      - name: Tag name
        id: tag-name
        run: echo "tag=${GITHUB_REF#refs/tags/}" >> "$GITHUB_OUTPUT"

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: go.mod
          check-latest: true

      - name: Build binaries
        id: build_bins
        env:
          TAG_NAME: ${{ steps.tag-name.outputs.tag }}
        run: make build-all

      - name: Create release and upload binaries
        uses: softprops/action-gh-release@v2
        if: startsWith(github.ref, 'refs/tags/')
        with:
          files: |
            bin/k0sctl-*
            bin/checksums.txt
          body_path: bin/checksums.md
          tag_name: ${{ steps.tag-name.outputs.tag }}
          name: ${{ steps.tag-name.outputs.tag }}
          draft: true # So we can manually edit before publishing
          prerelease: ${{ contains(steps.tag-name.outputs.tag, '-') }} # v0.1.2-beta1, 1.2.3-rc1
0707010000000E000081A40000000000000000000000016842976900001F05000000000000000000000000000000000000002A00000000k0sctl-0.25.1/.github/workflows/smoke.ymlname: Smoke tests

on: 
  pull_request:
    paths:
      - '**.go'
      - go.mod
      - go.sum
      - Makefile
      - .github/workflows/smoke.yml
      - smoke-test/**

jobs:

  build:
    runs-on: ubuntu-24.04

    steps:
    - uses: actions/checkout@v4

    - name: Set up Go
      uses: actions/setup-go@v5
      with:
        go-version-file: go.mod
        check-latest: true

    - name: Build
      run: make k0sctl

    - name: Stash the compiled binary for further testing
      uses: actions/upload-artifact@v4
      with:
        name: k0sctl
        path: k0sctl
        retention-days: 2

  smoke-basic:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
          - quay.io/k0sproject/bootloose-amazonlinux2023
          - quay.io/k0sproject/bootloose-debian12
          - quay.io/k0sproject/bootloose-fedora38
          - quay.io/k0sproject/bootloose-rockylinux9
          - quay.io/k0sproject/bootloose-ubuntu22.04
    name: Basic 1+1 smoke
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-basic
  
  smoke-basic-rootless:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-debian12
          - quay.io/k0sproject/bootloose-ubuntu22.04
    name: Basic 1+1 smoke (regular user login)
    needs: build
    runs-on: ubuntu-24.04

    steps:

      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-basic-rootless
  
  smoke-basic-idlike:
    name: Basic 1+1 smoke (ID_LIKE fallback)
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache

      - name: Build image
        run: |
          make -C smoke-test kalilinux.iid
          echo "LINUX_IMAGE=$(cat smoke-test/kalilinux.iid)" >> "$GITHUB_ENV"

      - name: Run smoke tests
        run: make smoke-basic
  
  smoke-basic-openssh:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
    name: Basic 1+1 smoke using openssh client
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-basic-openssh
  
  smoke-multidoc:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
    name: Basic 1+1 smoke using multidoc yamls
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-multidoc

  smoke-files:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-ubuntu22.04
          - quay.io/k0sproject/bootloose-alpine3.18
    name: Basic file upload smoke
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        run: make smoke-files

  smoke-dynamic:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
    name: Basic dynamic config smoke
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        run: make smoke-dynamic

  smoke-os-override:
    name: OS override smoke test
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run OS override smoke test
        run: make smoke-os-override

  smoke-downloadurl:
    name: k0sDownloadURL smoke test
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run k0sDownloadURL smoke test
        run: make smoke-downloadurl

  smoke-upgrade:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
          - quay.io/k0sproject/bootloose-amazonlinux2023
          - quay.io/k0sproject/bootloose-rockylinux9
          - quay.io/k0sproject/bootloose-ubuntu22.04
        k0s_from:
          - v1.21.6+k0s.0
    name: Upgrade
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
          K0S_FROM:  ${{ matrix.k0s_from }}
        run: make smoke-upgrade
  
  smoke-dryrun:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
          - quay.io/k0sproject/bootloose-ubuntu22.04
        k0s_from:
          - v1.21.6+k0s.0
    name: Dry run
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
          K0S_FROM:  ${{ matrix.k0s_from }}
        run: make smoke-dryrun

  smoke-reset:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-rockylinux9
          - quay.io/k0sproject/bootloose-ubuntu22.04

    name: Apply + reset
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-reset

  smoke-backup-restore:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
          - quay.io/k0sproject/bootloose-rockylinux9
          - quay.io/k0sproject/bootloose-ubuntu22.04

    name: Apply + backup + reset + restore
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-backup-restore
  
  smoke-backup-restore-out:
    name: Apply + backup + reset + restore (non-default output)
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          OUT: localfile
        run: make smoke-backup-restore
  
  smoke-controller-swap:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18

    name: Controller swap
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-controller-swap
  
  smoke-reinstall:
    strategy:
      matrix:
        image:
          - quay.io/k0sproject/bootloose-alpine3.18
          - quay.io/k0sproject/bootloose-ubuntu22.04

    name: Reinstall (modify install flags)
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run smoke tests
        env:
          LINUX_IMAGE: ${{ matrix.image }}
        run: make smoke-reinstall

  smoke-init:
    name: Init sub-command smoke test
    needs: build
    runs-on: ubuntu-24.04

    steps:
      - uses: actions/checkout@v4
      - uses: ./.github/actions/smoke-test-cache
      - name: Run init smoke test
        run: make smoke-init

0707010000000F000081A40000000000000000000000016842976900000BD4000000000000000000000000000000000000003A00000000k0sctl-0.25.1/.github/workflows/update-latest-release.ymlname: Update Latest Release

on:
  push:
    branches: [main]
    paths:
      - '**.go'
      - go.mod
      - go.sum
      - Makefile
      - .github/workflows/update-latest-release.yml
      - .github/workflows/release.yml

permissions:
  contents: write 

jobs:
  publish-latest:
    name: Publish Latest Release
    runs-on: ubuntu-latest
    steps:
      - name: Check out code
        uses: actions/checkout@v4
        with:
          ref: ${{ github.sha }}
          fetch-depth: 0

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: go.mod
          check-latest: true

      - name: Build binaries and checksums
        id: build_bins
        env:
          TAG_NAME: v0.0.0-dev
        run: |
          make build-all

      - name: Create development release notes
        run: |
          test -f bin/checksums.md
          test -f bin/k0sctl-linux-amd64
          COMMIT_HASH=$(git rev-parse --short ${{ github.sha }})
          DATE=$(date -u +"%Y-%m-%d at %H:%M UTC")
          LATEST_STABLE_TAG=$(git tag --list 'v[0-9]*.[0-9]*.[0-9]*' --sort=version:refname | tail -n1)
          PREV_STABLE_COMMIT=$(git rev-list -n 1 "$LATEST_STABLE_TAG")
          REPO_URL="https://github.com/${GITHUB_REPOSITORY}"
          {
            echo "## Latest Development Build"
            echo
            echo "This release was generated automatically from commit [\`$COMMIT_HASH\`]($REPO_URL/commit/${{ github.sha }}) on $DATE."
            echo
            echo "**This is a development build and may include unfinished features, bugs, or other issues. Use with caution.**"
            echo
            echo "### Commits since last development release:"
            echo
      
            if [ -n "$PREV_STABLE_COMMIT" ]; then
              git log "${PREV_STABLE_COMMIT}..HEAD" --pretty=format:'- %h %s (%an)'
            else
              git log -n 5 --pretty=format:'- %h %s (%an)'
            fi
      
            echo
            echo "---"
            echo
            cat bin/checksums.md
          } > bin/dev.md
      
      - name: Delete existing latest release and tag
        env:
          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
        run: |
          gh release delete dev --cleanup-tag -y || echo "No existing dev release found, proceeding."
          git tag -d dev || echo "No local dev tag found."
          git push origin --delete dev || echo "No remote dev tag found."
          sleep 5

      - name: Create and push new dev tag
        env:
          COMMIT_SHA: ${{ github.sha }}
        run: |
          git tag dev "${COMMIT_SHA}"
          git push origin dev

      - name: Create GitHub Release
        uses: softprops/action-gh-release@v2
        with:
          tag_name: dev
          name: Latest Development Build
          body_path: bin/dev.md 
          files: |
            bin/k0sctl-*
            bin/checksums.txt
          prerelease: true 
          fail_on_unmatched_files: true
          target_commitish: ${{ github.sha }}
07070100000010000081A4000000000000000000000001684297690000000C000000000000000000000000000000000000001900000000k0sctl-0.25.1/.gitignorebin/
k0sctl
07070100000011000081A40000000000000000000000016842976900002DD3000000000000000000000000000000000000001600000000k0sctl-0.25.1/LICENSEPortions of this software are licensed as follows:

* All content residing under the "docs/" directory of this repository is licensed under "Creative Commons Attribution Share Alike 4.0 International" (CC-BY-SA-4.0). See docs/LICENCE for details.
* Content outside of the above mentioned directories or restrictions above is available under the "Apache License 2.0" as defined below.

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright 2023, k0sctl authors.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
07070100000012000081A40000000000000000000000016842976900000AEB000000000000000000000000000000000000001700000000k0sctl-0.25.1/MakefileGO_SRCS := $(shell find . -type f -name '*.go' -a ! \( -name 'zz_generated*' -o -name '*_test.go' \))
GO_TESTS := $(shell find . -type f -name '*_test.go')
TAG_NAME = $(shell git describe --tags --abbrev=0 --exact-match 2>/dev/null)
GIT_COMMIT = $(shell git rev-parse --short=7 HEAD)
ifdef TAG_NAME
	ENVIRONMENT = production
endif
ENVIRONMENT ?= development
PREFIX = /usr/local

LD_FLAGS = -s -w -X github.com/k0sproject/k0sctl/version.Environment=$(ENVIRONMENT) -X github.com/carlmjohnson/versioninfo.Revision=$(GIT_COMMIT) -X github.com/carlmjohnson/versioninfo.Version=$(TAG_NAME)
BUILD_FLAGS = -trimpath -a -tags "netgo,osusergo,static_build" -installsuffix netgo -ldflags "$(LD_FLAGS) -extldflags '-static'"

k0sctl: $(GO_SRCS)
	go build $(BUILD_FLAGS) -o k0sctl main.go

bin/k0sctl-linux-amd64: $(GO_SRCS)
	GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o bin/k0sctl-linux-amd64 main.go

bin/k0sctl-linux-arm64: $(GO_SRCS)
	GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build $(BUILD_FLAGS) -o bin/k0sctl-linux-arm64 main.go

bin/k0sctl-linux-arm: $(GO_SRCS)
	GOOS=linux GOARCH=arm CGO_ENABLED=0 go build $(BUILD_FLAGS) -o bin/k0sctl-linux-arm main.go

bin/k0sctl-win-amd64.exe: $(GO_SRCS)
	GOOS=windows GOARCH=amd64 go build $(BUILD_FLAGS) -o bin/k0sctl-win-amd64.exe main.go

bin/k0sctl-darwin-amd64: $(GO_SRCS)
	GOOS=darwin GOARCH=amd64 go build $(BUILD_FLAGS) -o bin/k0sctl-darwin-amd64 main.go

bin/k0sctl-darwin-arm64: $(GO_SRCS)
	GOOS=darwin GOARCH=arm64 go build $(BUILD_FLAGS) -o bin/k0sctl-darwin-arm64 main.go

bins := k0sctl-linux-amd64 k0sctl-linux-arm64 k0sctl-linux-arm k0sctl-win-amd64.exe k0sctl-darwin-amd64 k0sctl-darwin-arm64

bin/checksums.txt: $(addprefix bin/,$(bins))
	sha256sum -b $(addprefix bin/,$(bins)) | sed 's/bin\///' > $@

bin/checksums.md: bin/checksums.txt
	@echo "### SHA256 Checksums" > $@
	@echo >> $@
	@echo "\`\`\`" >> $@
	@cat $< >> $@
	@echo "\`\`\`" >> $@

.PHONY: build-all
build-all: $(addprefix bin/,$(bins)) bin/checksums.md

.PHONY: clean
clean:
	rm -rf bin/ k0sctl

smoketests := smoke-basic smoke-basic-rootless smoke-files smoke-upgrade smoke-reset smoke-os-override smoke-init smoke-backup-restore smoke-dynamic smoke-basic-openssh smoke-dryrun smoke-downloadurl smoke-controller-swap smoke-reinstall smoke-multidoc
.PHONY: $(smoketests)
$(smoketests): k0sctl
	$(MAKE) -C smoke-test $@

golint := $(shell which golangci-lint 2>/dev/null)
ifeq ($(golint),)
golint := $(shell go env GOPATH)/bin/golangci-lint
endif

$(golint):
	go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest

.PHONY: lint
lint: $(golint)
	$(golint) run ./...

.PHONY: test
test: $(GO_SRCS) $(GO_TESTS)
	go test -v ./...

.PHONY: install
install: k0sctl
	install -d $(DESTDIR)$(PREFIX)/bin/
	install -m 755 k0sctl $(DESTDIR)$(PREFIX)/bin/
07070100000013000081A4000000000000000000000001684297690000605D000000000000000000000000000000000000001800000000k0sctl-0.25.1/README.md# k0sctl

*A command-line bootstrapping and management tool for [k0s zero friction kubernetes](https://k0sproject.io/) clusters.*

- [Installation](#installation)
- [Development status](#development-status)
- [Usage](#usage)
- [Configuration](#configuration-file)

Example output of k0sctl deploying a k0s cluster:

```text
INFO ==> Running phase: Connect to hosts
INFO ==> Running phase: Detect host operating systems
INFO [ssh] 10.0.0.1:22: is running Ubuntu 20.10
INFO [ssh] 10.0.0.2:22: is running Ubuntu 20.10
INFO ==> Running phase: Prepare hosts
INFO ==> Running phase: Gather host facts
INFO [ssh] 10.0.0.1:22: discovered 10.12.18.133 as private address
INFO ==> Running phase: Validate hosts
INFO ==> Running phase: Gather k0s facts
INFO ==> Running phase: Download k0s binaries on hosts
INFO ==> Running phase: Configure k0s
INFO ==> Running phase: Initialize the k0s cluster
INFO [ssh] 10.0.0.1:22: installing k0s controller
INFO ==> Running phase: Install workers
INFO [ssh] 10.0.0.1:22: generating token
INFO [ssh] 10.0.0.2:22: installing k0s worker
INFO [ssh] 10.0.0.2:22: waiting for node to become ready
INFO ==> Running phase: Disconnect from hosts
INFO ==> Finished in 2m2s
INFO k0s cluster version 1.22.3+k0s.0 is now installed
INFO Tip: To access the cluster you can now fetch the admin kubeconfig using:
INFO      k0sctl kubeconfig
```

You can find example Terraform and [bootloose](https://github.com/k0sproject/bootloose) configurations in the [examples/](examples/) directory.

## Installation

### Install from the released binaries

Download the desired version for your operating system and processor architecture from the [k0sctl releases page](https://github.com/k0sproject/k0sctl/releases). Make the file executable and place it in a directory available in your `$PATH`.

As the released binaries aren't signed yet, on macOS and Windows, you must first run the executable via "Open" in the context menu and allow running it.

### Install from the sources

If you have a working Go toolchain, you can use `go install` to install k0sctl to your `$GOPATH/bin`.

```sh
go install github.com/k0sproject/k0sctl@latest
```

### Package managers

#### [Homebrew](https://brew.sh/) (macOS, Linux)

```sh
brew install k0sproject/tap/k0sctl
```

#### [Chocolatey](https://chocolatey.org/) (Windows)

Note: The [chocolatey package](https://community.chocolatey.org/packages/k0sctl) is community maintained, any issues should be reported to the maintainer of the package.

```sh
choco install k0sctl
```

#### Shell auto-completions

##### Bash

```sh
k0sctl completion > /etc/bash_completion.d/k0sctl
```

##### Zsh

```sh
k0sctl completion > /usr/local/share/zsh/site-functions/_k0sctl

# For oh my zsh
k0sctl completion > $ZSH_CACHE_DIR/completions/_k0sctl
```

##### Fish

```sh
k0sctl completion > ~/.config/fish/completions/k0sctl.fish
```

## Development status

K0sctl is ready for use and in continuous development.

## Usage

### `k0sctl apply`

The main function of k0sctl is the `k0sctl apply` subcommand. Provided a configuration file describing the desired cluster state, k0sctl will connect to the listed hosts, determines the current state of the hosts and configures them as needed to form a k0s cluster.

The default location for the configuration file is `k0sctl.yaml` in the current working directory. To load a configuration from a different location, use:

```sh
k0sctl apply --config path/to/k0sctl.yaml
```

If the configuration cluster version `spec.k0s.version` is greater than the version detected on the cluster, a cluster upgrade will be performed. If the configuration lists hosts that are not part of the cluster, they will be configured to run k0s and will be joined to the cluster.

### `k0sctl init`

Generate a configuration template. Use `--k0s` to include an example `spec.k0s.config` k0s configuration block. You can also supply a list of host addresses via arguments or stdin.

Output a minimal configuration template:

```sh
k0sctl init > k0sctl.yaml
```

Output an example configuration with a default k0s config:

```sh
k0sctl init --k0s > k0sctl.yaml
```

Create a configuration from a list of host addresses and pipe it to k0sctl apply:

```sh
k0sctl init 10.0.0.1 10.0.0.2 ubuntu@10.0.0.3:8022 | k0sctl apply --config -
```

### `k0sctl backup & restore`

Takes a [backup](https://docs.k0sproject.io/stable/backup/) of the cluster control plane state into the current working directory.

The files are currently named with a running (unix epoch) timestamp, e.g. `k0s_backup_1623220591.tar.gz`.

Restoring a backup can be done as part of the [k0sctl apply](#k0sctl-apply) command using `--restore-from k0s_backup_1623220591.tar.gz` flag.

Restoring the cluster state is a full restoration of the cluster control plane state, including:
- Etcd datastore content
- Certificates
- Keys

In general restore is intended to be used as a disaster recovery mechanism and thus it expects that no k0s components actually exist on the controllers.

Known limitations in the current restore process:
- The control plane address (`externalAddress`) needs to remain the same between backup and restore. This is caused by the fact that all worker node components connect to this address and cannot currently be re-configured.

### `k0sctl reset`

Uninstall k0s from the hosts listed in the configuration.

### `k0sctl kubeconfig`

Connects to the cluster and outputs a kubeconfig file that can be used with `kubectl` or `kubeadm` to manage the kubernetes cluster.

Example:

```sh
$ k0sctl kubeconfig --config path/to/k0sctl.yaml > k0s.config
$ kubectl get node --kubeconfig k0s.config
NAME      STATUS     ROLES    AGE   VERSION
worker0   NotReady   <none>   10s   v1.20.2-k0s1
```

## Configuration file

The configuration file is in YAML format and loosely resembles the syntax used in Kubernetes. YAML anchors and aliases can be used.

To generate a simple skeleton configuration file, you can use the `k0sctl init` subcommand.

Configuration example:

```yaml
apiVersion: k0sctl.k0sproject.io/v1beta1
kind: Cluster
metadata:
  name: my-k0s-cluster
  user: admin
spec:
  hosts:
  - role: controller
    installFlags:
    - --debug
    ssh:
      address: 10.0.0.1
      user: root
      port: 22
      keyPath: ~/.ssh/id_rsa
  - role: worker
    installFlags:
    - --debug
    ssh:
      address: 10.0.0.2
  k0s:
    version: 0.10.0
    config:
      apiVersion: k0s.k0sproject.io/v1beta1
      kind: ClusterConfig
      metadata:
        name: my-k0s-cluster
      spec:
        images:
          calico:
            cni:
              image: calico/cni
              version: v3.16.2
  options:
    wait:
      enabled: true
    drain:
      enabled: true
    evictTaint:
      enabled: false
      taint: k0sctl.k0sproject.io/evict=true
      effect: NoExecute
    concurrency:
      limit: 30
      uploads: 5
```

### Environment variable substitution

Simple bash-like expressions are supported in the configuration for environment variable substition.

- `$VAR` or `${VAR}` value of `VAR` environment variable
- `${var:-DEFAULT_VALUE}` will use `VAR` if non-empty, otherwise `DEFAULT_VALUE`
- `$$var` - escape, result will be `$var`.
- And [several other expressions](https://github.com/a8m/envsubst#docs)

### Configuration Header Fields

###### `apiVersion` &lt;string&gt; (required)

The configuration file syntax version. Currently the only supported version is `k0sctl.k0sproject.io/v1beta1`.

###### `kind` &lt;string&gt; (required)

In the future, some of the configuration APIs can support multiple types of objects. For now, the only supported kind is `Cluster`.

###### `spec` &lt;mapping&gt; (required)

The main object definition, see [below](#spec-fields)

###### `metadata` &lt;mapping&gt; (optional)

Information that can be used to uniquely identify the object.

Example:

```yaml
metadata:
  name: k0s-cluster-name
  user: kubernetes-admin
```

### Spec Fields

##### `spec.hosts` &lt;sequence&gt; (required)

A list of cluster hosts. Host requirements:

* Currently only linux targets are supported
* The user must either be root or have passwordless `sudo` access.
* The host must fulfill the k0s system requirements

See [host object documentation](#host-fields) below.

##### `spec.k0s` &lt;mapping&gt; (optional)

Settings related to the k0s cluster.

See [k0s object documentation](#k0s-fields) below.

### Host Fields

###### `spec.hosts[*].role` &lt;string&gt; (required)

One of:
- `controller` - a controller host
- `controller+worker` - a controller host that will also run workloads
- `single` - a [single-node cluster](https://docs.k0sproject.io/stable/k0s-single-node/) host, the configuration can only contain one host
- `worker` - a worker host

###### `spec.hosts[*].noTaints` &lt;boolean&gt; (optional) (default: `false`)

When `true` and used in conjuction with the `controller+worker` role, the default taints are disabled making regular workloads schedulable on the node. By default, k0s sets a node-role.kubernetes.io/master:NoSchedule taint on controller+worker nodes and only workloads with toleration for it will be scheduled.

###### `spec.hosts[*].uploadBinary` &lt;boolean&gt; (optional) (default: `false`)

When `true`, the k0s binaries for target host will be downloaded and cached on the local host and uploaded to the target.
When `false`, the k0s binary downloading is performed on the target host itself

###### `spec.hosts[*].k0sBinaryPath` &lt;string&gt; (optional)

A path to a file on the local host that contains a k0s binary to be uploaded to the host. Can be used to test drive a custom development build of k0s.

###### `spec.hosts[*].k0sDownloadURL` &lt;string&gt; (optional)

A URL to download the k0s binary from. The default is to download from the [k0s repository](https://github.com/k0sproject/k0s). The URL can contain '%'-prefixed tokens that will be replaced with the host's information, see [tokens](#tokens).

###### `spec.hosts[*].hostname` &lt;string&gt; (optional)

Override host's hostname. When not set, the hostname reported by the operating system is used.

###### `spec.hosts[*].dataDir` &lt;string&gt; (optional) (default: `/var/lib/k0s`)

Set host's k0s data-dir.

###### `spec.hosts[*].kubeletRootDir` &lt;string&gt; (optional) (default: `""`)

Set host's k0s kubelet-root-dir.

###### `spec.hosts[*].installFlags` &lt;sequence&gt; (optional)

Extra flags passed to the `k0s install` command on the target host. See `k0s install --help` for a list of options.

###### `spec.hosts[*].environment` &lt;mapping&gt; (optional)

List of key-value pairs to set to the target host's environment variables.

Example:

```yaml
environment:
  HTTP_PROXY: 10.0.0.1:443
```

###### `spec.hosts[*].files` &lt;sequence&gt; (optional)

List of files to be uploaded to the host.

Example:

```yaml
- name: image-bundle
  src: airgap-images.tgz
  dstDir: /var/lib/k0s/images/
  perm: 0600
```

* `name`: name of the file "bundle", used only for logging purposes (optional)
* `src`: File path, an URL or [Glob pattern](https://golang.org/pkg/path/filepath/#Match) to match files to be uploaded. URL sources will be directly downloaded using the target host. If the value is a URL, '%'-prefixed tokens can be used, see [tokens](#tokens). (required)
* `dstDir`: Destination directory for the file(s). `k0sctl` will create full directory structure if it does not already exist on the host (default: user home)
* `dst`: Destination filename for the file. Only usable for single file uploads (default: basename of file)
* `perm`: File permission mode for uploaded file(s) (default: same as local)
* `dirPerm`: Directory permission mode for created directories (default: 0755)
* `user`: User name of file/directory owner, must exist on the host (optional)
* `group`: Group name of file/directory owner, must exist on the host (optional)

###### `spec.hosts[*].hooks` &lt;mapping&gt; (optional)

Run a set of commands on the remote host during k0sctl operations.

Example:

```yaml
hooks:
  apply:
    before:
      - date >> k0sctl-apply.log
    after:
      - echo "apply success" >> k0sctl-apply.log
```

The currently available "hook points" are:

* `apply`: Runs during `k0sctl apply`
    - `before`: Runs after configuration and host validation, right before configuring k0s on the host
    - `after`: Runs before disconnecting from the host after a successful apply operation
* `backup`: Runs during `k0s backup`
    - `before`: Runs before k0sctl runs the `k0s backup` command
    - `after`: Runs before disconnecting from the host after successfully taking a backup
* `reset`: Runs during `k0sctl reset`
    - `before`: Runs after gathering information about the cluster, right before starting to remove the k0s installation.
    - `after`: Runs before disconnecting from the host after a successful reset operation

##### `spec.hosts[*].os` &lt;string&gt; (optional) (default: ``)

Override OS distribution auto-detection. By default `k0sctl` detects the OS by reading `/etc/os-release` or `/usr/lib/os-release` files. In case your system is based on e.g. Debian but the OS release info has something else configured you can override `k0sctl` to use Debian based functionality for the node with:

```yaml
  - role: worker
    os: debian
    ssh:
      address: 10.0.0.2
```

##### `spec.hosts[*].privateInterface` &lt;string&gt; (optional) (default: ``)

Override private network interface selected by host fact gathering.
Useful in case fact gathering picks the wrong private network interface.

```yaml
  - role: worker
    os: debian
    privateInterface: eth1
```

##### `spec.hosts[*].privateAddress` &lt;string&gt; (optional) (default: ``)

Override private IP address selected by host fact gathering.
Useful in case fact gathering picks the wrong IPAddress.

```yaml
  - role: worker
    os: debian
    privateAddress: 10.0.0.2
```

##### `spec.hosts[*].ssh` &lt;mapping&gt; (optional)

SSH connection options.

Example:

```yaml
spec:
  hosts:
    - role: controller
      ssh:
        address: 10.0.0.2
        user: ubuntu
        keyPath: ~/.ssh/id_rsa
```

It's also possible to tunnel connections through a bastion host. The bastion configuration has all the same fields as any SSH connection:

```yaml
spec:
  hosts:
    - role: controller
      ssh:
        address: 10.0.0.2
        user: ubuntu
        keyPath: ~/.ssh/id_rsa
        bastion:
          address: 10.0.0.1
          user: root
          keyPath: ~/.ssh/id_rsa2
```

SSH agent and auth forwarding are also supported, a host without a keyfile:

```yaml
spec:
  hosts:
    - role: controller
      ssh:
        address: 10.0.0.2
        user: ubuntu
```

```shell
$ ssh-add ~/.ssh/aws.pem
$ ssh -A user@jumphost
user@jumphost ~ $ k0sctl apply
```

Pageant or openssh-agent can be used on Windows.

###### `spec.hosts[*].ssh.address` &lt;string&gt; (required)

IP address of the host

###### `spec.hosts[*].ssh.user` &lt;string&gt; (optional) (default: `root`)

Username to log in as.

###### `spec.hosts[*].ssh.port` &lt;number&gt; (required)

TCP port of the SSH service on the host.

###### `spec.hosts[*].ssh.keyPath` &lt;string&gt; (optional) (default: `~/.ssh/identity ~/.ssh/id_rsa ~/.ssh/id_dsa`)

Path to an SSH key file. If a public key is used, ssh-agent is required. When left empty, the default value will first be looked for from the ssh configuration (default `~/.ssh/config`) `IdentityFile` parameter.

##### `spec.hosts[*].localhost` &lt;mapping&gt; (optional)

Localhost connection options. Can be used to use the local host running k0sctl as a node in the cluster.

###### `spec.hosts[*].localhost.enabled` &lt;boolean&gt; (optional) (default: `false`)

This must be set `true` to enable the localhost connection.

##### `spec.hosts[*].openSSH` &lt;mapping&gt; (optional)

An alternative SSH client protocol that uses the system's openssh client for connections.

Example:

```yaml
spec:
  hosts:
    - role: controller
      openSSH:
        address: 10.0.0.2
```

The only required field is the `address` and it can also be a hostname that is found in the ssh config. All other options such as user, port and keypath will use the same defaults as if running `ssh` from the command-line or will use values found from the ssh config.

An example SSH config:

```
Host controller1
  Hostname 10.0.0.1
  Port 2222
  IdentityFile ~/.ssh/id_cluster_esa
```

If this is in your `~/.ssh/config`, you can simply use the host alias as the address in your k0sctl config:

```yaml
spec:
  hosts:
    - role: controller
      openSSH:
        address: controller1
        # if the ssh configuration is in a different file, you can use:
        # configPath: /path/to/config
```

###### `spec.hosts[*].openSSH.address` &lt;string&gt; (required)

IP address, hostname or ssh config host alias of the host

###### `spec.hosts[*].openSSH.user` &lt;string&gt; (optional)

Username to connect as.

###### `spec.hosts[*].openSSH.port` &lt;number&gt; (optional)

Remote port.

###### `spec.hosts[*].openSSH.keyPath` &lt;string&gt; (optional)

Path to private key.

###### `spec.hosts[*].openSSH.configPath` &lt;string&gt; (optional)

Path to ssh config, defaults to ~/.ssh/config with fallback to /etc/ssh/ssh_config.

###### `spec.hosts[*].openSSH.disableMultiplexing` &lt;boolean&gt; (optional)

The default mode of operation is to use connection multiplexing where a ControlMaster connection is opened and the subsequent connections to the same host use the master connection over a socket to communicate to the host. 

If this is disabled by setting `disableMultiplexing: true`, running every remote command will require reconnecting and reauthenticating to the host.

###### `spec.hosts[*].openSSH.options` &lt;mapping&gt; (optional)

Additional options as key/value pairs to use when running the ssh client.

Example:

```yaml
openSSH:
  address: host
  options:
      ForwardAgent: true  # -o ForwardAgent=yes
      StrictHostkeyChecking: false # -o StrictHostkeyChecking: no
```

###### `spec.hosts[*].reset` &lt;boolean&gt; (optional) (default: `false`)

If set to `true` k0sctl will remove the node from kubernetes and reset k0s on the host.

### K0s Fields

##### `spec.k0s.version` &lt;string&gt; (optional) (default: auto-discovery)

The version of k0s to deploy. When left out, k0sctl will default to using the latest released version of k0s or the version already running on the cluster.

##### `spec.k0s.versionChannel` &lt;string&gt; (optional) (default: `stable`)

Possible values are `stable` and `latest`.

When `spec.k0s.version` is left undefined, this setting can be set to `latest` to allow k0sctl to include k0s pre-releases when looking for the latest version. The default is to only look for stable releases.

##### `spec.k0s.dynamicConfig` &lt;boolean&gt; (optional) (default: false)

Enable k0s dynamic config. The setting will be automatically set to true if:

* Any controller node has `--enable-dynamic-config` in `installFlags`
* Any existing controller node has `--enable-dynamic-config` in run arguments (`k0s status -o json`)

**Note:** When running k0s in dynamic config mode, k0sctl will ONLY configure the cluster-wide configuration during the first time initialization, after that the configuration has to be managed via `k0s config edit` or `k0sctl config edit`. The node specific configuration will be updated on each apply.

See also:

* [k0s Dynamic Configuration](https://docs.k0sproject.io/stable/dynamic-configuration/)

##### `spec.k0s.config` &lt;mapping&gt; (optional) (default: auto-generated)

Embedded k0s cluster configuration. See [k0s configuration documentation](https://docs.k0sproject.io/stable/configuration/) for details.

When left out, the output of `k0s config create` will be used.

You can also host the configuration in a separate file or as a separate YAML document in the same file in the standard k0s configuration format.

```yaml
apiVersion: k0sctl.k0sproject.io/v1beta1
kind: Cluster
spec:
  hosts:
    - role: single
      ssh:
        address: 10.0.0.1
---
apiVersion: k0s.k0sproject.io/v1beta1
kind: ClusterConfig
metadata:
  name: my-k0s-cluster
spec:
  api:
    externalAddress: 10.0.0.2
```

### Options Fields

The `spec.options` field contains options that can be used to modify the behavior of k0sctl.

Example:

```yaml
spec:
  options:
    wait:
      enabled: true
    drain:
      enabled: true
    evictTaint:
      enabled: false
      taint: k0sctl.k0sproject.io/evict=true
      effect: NoExecute
    concurrency:
      limit: 30
      workerDisruptionPercent: 10
      uploads: 5
```

##### `spec.options.wait.enabled` &lt;boolean&gt; (optional) (default: true)

If set to `false`, k0sctl will not wait for k0s to become ready after restarting the service. By default, k0sctl waits for nodes to become ready before continuing to the next operation. This is functionally the same as using `--no-wait` on the command line.

##### `spec.options.drain.enabled` &lt;boolean&gt; (optional) (default: true)

If set to `false`, k0sctl will skip draining nodes before performing disruptive operations like upgrade or reset. By default, nodes are drained to allow for graceful pod eviction. This is functionally the same as using `--no-drain` on the command line.

##### `spec.options.drain.gracePeriod` &lt;duration&gt; (optional) (default: 2m)

The duration to wait for pods to be evicted from the node before proceeding with the operation. 

##### `spec.options.drain.timeout` &lt;duration&gt; (optional) (default: 5m)

The duration to wait for the drain operation to complete before timing out. 

##### `spec.options.drain.force` &lt;boolean&gt; (optional) (default: true)

Use `--force` in kubectl when draining the node.

##### `spec.options.drain.ignoreDaemonSets` &lt;boolean&gt; (optional) (default: true)

Ignore DaemonSets when draining the node.

##### `spec.options.drain.deleteEmptyDirData` &lt;boolean&gt; (optional) (default: true)

Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).

##### `spec.options.drain.skipWaitForDeleteTimeout` &lt;duration&gt; (optional) (default: 0s)

If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.

##### `spec.options.drain.podSelector` &lt;string&gt; (optional) (default: ``)

Label selector to filter pods on the node

##### `spec.options.evictTaint.enabled` &lt;boolean&gt; (optional) (default: false)

When enabled, k0sctl will apply a taint to nodes before service-affecting operations such as upgrade or reset. This is used to signal workloads to be evicted in advance of node disruption. You can also use the `--evict-taint=k0sctl.k0sproject.io/evic=true:NoExecute` command line option to enable this feature.

##### `spec.options.evictTaint.taint` &lt;string&gt; (optional) (default: `k0sctl.k0sproject.io/evict=true`)

The taint to apply when `evictTaint.enabled` is `true`. Must be in the format `key=value`.

##### `spec.options.evictTaint.effect` &lt;string&gt; (optional) (default: `NoExecute`)

The taint effect to apply. Must be one of:

* `NoExecute`
* `NoSchedule`
* `PreferNoSchedule`

##### `spec.options.evictTaint.controllerWorkers` &lt;boolean&gt; (optional) (default: false)

Whether to also apply the taint to nodes with the controller+worker dual role. By default, taints are only applied to worker-only nodes.

##### `spec.options.concurrency.limit` &lt;integer&gt; (optional) (default: 30)

The maximum number of hosts to operate on concurrently during cluster operations. Same as the `--concurrency` command line option.

##### `spec.options.concurrency.workerDisruptionPercent` &lt;integer&gt; (optional) (default: 10)

The maximum percentage of worker nodes that can be disrupted at the same time during operations such as upgrade. This is used to ensure that a minimum number of worker nodes remain available during the operation. The value must be between 0 and 100.

##### `spec.options.concurrency.uploads` &lt;integer&gt; (optional) (default: 5)

The maximum number of concurrent file uploads to perform. Same as the `--concurrent-uploads` command line option.

### Tokens

The following tokens can be used in the `k0sDownloadURL` and `files.[*].src` fields:

- `%%` - literal `%`
- `%p` - host architecture (arm, arm64, amd64)
- `%v` - k0s version (v1.21.0+k0s.0)
- `%x` - k0s binary extension (currently always empty)

Any other tokens will be output as-is including the `%` character.

Example:

```yaml
  - role: controller
    k0sDownloadURL: https://files.example.com/k0s%20files/k0s-%v-%p%x
    # Expands to https://files.example.com/k0s%20files/k0s-v1.21.0+k0s.0-amd64
```

07070100000014000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001500000000k0sctl-0.25.1/action07070100000015000081A400000000000000000000000168429769000013FF000000000000000000000000000000000000001E00000000k0sctl-0.25.1/action/apply.gopackage action

import (
	"context"
	"fmt"
	"io"
	"os"
	"os/exec"
	"path/filepath"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/phase"

	log "github.com/sirupsen/logrus"
)

type ApplyOptions struct {
	// Manager is the phase manager
	Manager *phase.Manager
	// DisableDowngradeCheck skips the downgrade check
	DisableDowngradeCheck bool
	// NoWait skips waiting for the cluster to be ready
	NoWait bool
	// NoDrain skips draining worker nodes
	NoDrain bool
	// RestoreFrom is the path to a cluster backup archive to restore the state from
	RestoreFrom string
	// KubeconfigOut is a writer to write the kubeconfig to
	KubeconfigOut io.Writer
	// KubeconfigAPIAddress is the API address to use in the kubeconfig
	KubeconfigAPIAddress string
	// KubeconfigUser is the username to use in the kubeconfig
	KubeconfigUser string
	// KubeconfigCluster is the cluster name to use in the kubeconfig
	KubeconfigCluster string
	// ConfigPaths is the list of paths to the configuration files (used for kubeconfig command tip on success)
	ConfigPaths []string
}

type Apply struct {
	ApplyOptions
	Phases phase.Phases
}

// NewApply creates a new Apply action. The list of phases can be modified via the Phases field, for example:
//
//	apply := NewApply(opts)
//	gatherK0sFacts := &phase.GatherK0sFacts{} // advisable to get the title from the phase itself instead of hardcoding the title.
//	apply.Phases.InsertBefore(gatherK0sFacts.Title(), &myCustomPhase{}) // insert a custom phase before the GatherK0sFacts phase
func NewApply(opts ApplyOptions) *Apply {
	lockPhase := &phase.Lock{}
	unlockPhase := lockPhase.UnlockPhase()
	apply := &Apply{
		ApplyOptions: opts,
		Phases: phase.Phases{
			&phase.DefaultK0sVersion{},
			&phase.Connect{},
			&phase.DetectOS{},
			lockPhase,
			&phase.PrepareHosts{},
			&phase.GatherFacts{},
			&phase.ValidateHosts{},
			&phase.GatherK0sFacts{},
			&phase.ValidateFacts{SkipDowngradeCheck: opts.DisableDowngradeCheck},
			&phase.ValidateEtcdMembers{},

			// if UploadBinaries: true
			&phase.DownloadBinaries{}, // downloads k0s binaries to local cache
			&phase.UploadK0s{},        // uploads k0s binaries to hosts from cache

			// if UploadBinaries: false
			&phase.DownloadK0s{}, // downloads k0s binaries directly from hosts

			&phase.UploadFiles{},
			&phase.InstallBinaries{},
			&phase.PrepareArm{},
			&phase.ConfigureK0s{},
			&phase.Restore{
				RestoreFrom: opts.RestoreFrom,
			},
			&phase.RunHooks{Stage: "before", Action: "apply"},
			&phase.InitializeK0s{},
			&phase.InstallControllers{},
			&phase.InstallWorkers{},
			&phase.UpgradeControllers{},
			&phase.UpgradeWorkers{NoDrain: opts.NoDrain},
			&phase.Reinstall{},
			&phase.ResetWorkers{NoDrain: opts.NoDrain},
			&phase.ResetControllers{NoDrain: opts.NoDrain},
			&phase.RunHooks{Stage: "after", Action: "apply"},
			&phase.ApplyManifests{},
			unlockPhase,
			&phase.Disconnect{},
		},
	}
	if opts.KubeconfigOut != nil {
		apply.Phases.InsertBefore(unlockPhase.Title(), &phase.GetKubeconfig{APIAddress: opts.KubeconfigAPIAddress, User: opts.KubeconfigUser, Cluster: opts.KubeconfigCluster})
	}

	return apply
}

// Run the Apply action
func (a Apply) Run(ctx context.Context) error {
	if len(a.Phases) == 0 {
		// for backwards compatibility with the old Apply struct without NewApply(..)
		tmpApply := NewApply(a.ApplyOptions)
		a.Phases = tmpApply.Phases
	}
	start := time.Now()

	phase.NoWait = a.NoWait

	a.Manager.SetPhases(a.Phases)

	var result error

	if result = a.Manager.Run(ctx); result != nil {
		log.Info(phase.Colorize.Red("==> Apply failed").String())
		return result
	}

	if a.KubeconfigOut != nil {
		if _, err := a.KubeconfigOut.Write([]byte(a.Manager.Config.Metadata.Kubeconfig)); err != nil {
			log.Warnf("failed to write kubeconfig to %s: %v", a.KubeconfigOut, err)
		}
	}

	duration := time.Since(start).Truncate(time.Second)
	text := fmt.Sprintf("==> Finished in %s", duration)
	log.Info(phase.Colorize.Green(text).String())

	for _, host := range a.Manager.Config.Spec.Hosts {
		if host.Reset {
			log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file")
			break
		}
	}

	if !a.Manager.DryRun {
		log.Infof("k0s cluster version %s is now installed", a.Manager.Config.Spec.K0s.Version)
	}

	if a.KubeconfigOut == nil {
		cmd := &strings.Builder{}
		executable, err := os.Executable()
		if err != nil {
			executable = "k0sctl"
		} else {
			// check if the basename of executable is in the PATH, if so, just use the basename
			if _, err := exec.LookPath(filepath.Base(executable)); err == nil {
				executable = filepath.Base(executable)
			}
		}

		cmd.WriteString(executable)
		cmd.WriteString(" kubeconfig")

		if len(a.ConfigPaths) > 0 && (len(a.ConfigPaths) != 1 && a.ConfigPaths[0] != "-" && a.ConfigPaths[0] != "k0sctl.yaml") {
			for _, path := range a.ConfigPaths {
				cmd.WriteString(" --config ")
				cmd.WriteString(path)
			}
		}

		log.Info("Tip: To access the cluster you can now fetch the admin kubeconfig using:")
		log.Info("     " + phase.Colorize.Cyan(cmd.String()).String())
	}

	return nil
}
07070100000016000081A400000000000000000000000168429769000003A7000000000000000000000000000000000000001F00000000k0sctl-0.25.1/action/backup.gopackage action

import (
	"context"
	"fmt"
	"io"
	"time"

	"github.com/k0sproject/k0sctl/phase"
	log "github.com/sirupsen/logrus"
)

type Backup struct {
	// Manager is the phase manager
	Manager *phase.Manager
	Out  io.Writer
}

func (b Backup) Run(ctx context.Context) error {
	start := time.Now()

	lockPhase := &phase.Lock{}

	b.Manager.AddPhase(
		&phase.Connect{},
		&phase.DetectOS{},
		lockPhase,
		&phase.PrepareHosts{},
		&phase.GatherFacts{SkipMachineIDs: true},
		&phase.GatherK0sFacts{},
		&phase.RunHooks{Stage: "before", Action: "backup"},
		&phase.Backup{Out: b.Out},
		&phase.RunHooks{Stage: "after", Action: "backup"},
		&phase.Unlock{Cancel: lockPhase.Cancel},
		&phase.Disconnect{},
	)

	if err := b.Manager.Run(ctx); err != nil {
		return err
	}

	duration := time.Since(start).Truncate(time.Second)
	text := fmt.Sprintf("==> Finished in %s", duration)
	log.Info(phase.Colorize.Green(text).String())
	return nil
}
07070100000017000081A40000000000000000000000016842976900000873000000000000000000000000000000000000002400000000k0sctl-0.25.1/action/config_edit.gopackage action

import (
	"context"
	"fmt"
	"io"
	"os"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/rig/exec"

	osexec "os/exec"

	"github.com/mattn/go-isatty"
)

type ConfigEdit struct {
	Config *v1beta1.Cluster
	Stdout io.Writer
	Stderr io.Writer
	Stdin  io.Reader
}

func (c ConfigEdit) Run(_ context.Context) error {
	stdoutFile, ok := c.Stdout.(*os.File)

	if !ok || !isatty.IsTerminal(stdoutFile.Fd()) {
		return fmt.Errorf("output is not a terminal")
	}

	editor, err := shellEditor()
	if err != nil {
		return err
	}

	h := c.Config.Spec.K0sLeader()

	if err := h.Connect(); err != nil {
		return fmt.Errorf("failed to connect: %w", err)
	}
	defer h.Disconnect()

	if err := h.ResolveConfigurer(); err != nil {
		return err
	}

	oldCfg, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get clusterconfig k0s -o yaml"), exec.Sudo(h))
	if err != nil {
		return fmt.Errorf("%s: %w", h, err)
	}

	tmpFile, err := os.CreateTemp("", "k0s-config.*.yaml")
	if err != nil {
		return err
	}
	defer func() { _ = os.Remove(tmpFile.Name()) }()

	if _, err := tmpFile.WriteString(oldCfg); err != nil {
		return err
	}

	if err := tmpFile.Close(); err != nil {
		return err
	}

	cmd := osexec.Command(editor, tmpFile.Name())
	cmd.Stdin = c.Stdin
	cmd.Stdout = c.Stdout
	cmd.Stderr = c.Stderr
	if err := cmd.Run(); err != nil {
		return fmt.Errorf("failed to start editor (%s): %w", cmd.String(), err)
	}

	newCfgBytes, err := os.ReadFile(tmpFile.Name())
	if err != nil {
		return err
	}
	newCfg := string(newCfgBytes)

	if newCfg == oldCfg {
		return fmt.Errorf("configuration was not changed, aborting")
	}

	if err := h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "apply -n kube-system -f -"), exec.Stdin(newCfg), exec.Sudo(h)); err != nil {
		return err
	}

	return nil
}

func shellEditor() (string, error) {
	if v := os.Getenv("VISUAL"); v != "" {
		return v, nil
	}
	if v := os.Getenv("EDITOR"); v != "" {
		return v, nil
	}
	if path, err := osexec.LookPath("vi"); err == nil {
		return path, nil
	}

	return "", fmt.Errorf("could not detect shell editor ($VISUAL, $EDITOR)")
}
07070100000018000081A40000000000000000000000016842976900000371000000000000000000000000000000000000002600000000k0sctl-0.25.1/action/config_status.gopackage action

import (
	"context"
	"fmt"
	"io"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/rig/exec"
)

type ConfigStatus struct {
	Config      *v1beta1.Cluster
	Concurrency int
	Format      string
	Writer      io.Writer
}

func (c ConfigStatus) Run(_ context.Context) error {
	h := c.Config.Spec.K0sLeader()

	if err := h.Connect(); err != nil {
		return fmt.Errorf("failed to connect: %w", err)
	}
	defer h.Disconnect()

	if err := h.ResolveConfigurer(); err != nil {
		return err
	}
	format := c.Format
	if format != "" {
		format = "-o " + format
	}

	output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get event --field-selector involvedObject.name=k0s %s", format), exec.Sudo(h))
	if err != nil {
		return fmt.Errorf("%s: %w", h, err)
	}
	fmt.Fprintln(c.Writer, output)

	return nil
}
07070100000019000081A4000000000000000000000001684297690000034D000000000000000000000000000000000000002300000000k0sctl-0.25.1/action/kubeconfig.gopackage action

import (
	"context"

	"github.com/k0sproject/k0sctl/phase"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

type Kubeconfig struct {
	// Manager is the phase manager
	Manager              *phase.Manager
	KubeconfigAPIAddress string
	KubeconfigUser       string
	KubeconfigCluster    string

	Kubeconfig string
}

func (k *Kubeconfig) Run(ctx context.Context) error {
	// Change so that the internal config has only single controller host as we
	// do not need to connect to all nodes
	k.Manager.Config.Spec.Hosts = cluster.Hosts{k.Manager.Config.Spec.K0sLeader()}

	k.Manager.AddPhase(
		&phase.Connect{},
		&phase.DetectOS{},
		&phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress, User: k.KubeconfigUser, Cluster: k.KubeconfigCluster},
		&phase.Disconnect{},
	)

	return k.Manager.Run(ctx)
}
0707010000001A000081A400000000000000000000000168429769000006AB000000000000000000000000000000000000001E00000000k0sctl-0.25.1/action/reset.gopackage action

import (
	"context"
	"fmt"
	"io"
	"os"
	"time"

	"github.com/k0sproject/k0sctl/phase"
	log "github.com/sirupsen/logrus"

	"github.com/AlecAivazis/survey/v2"
	"github.com/mattn/go-isatty"
)

type Reset struct {
	// Manager is the phase manager
	Manager *phase.Manager
	Stdout  io.Writer
}

func (r Reset) Run(ctx context.Context) error {
	if !phase.Force {
		if stdoutFile, ok := r.Stdout.(*os.File); ok && !isatty.IsTerminal(stdoutFile.Fd()) {
			return fmt.Errorf("reset requires --force")
		}
		confirmed := false
		prompt := &survey.Confirm{
			Message: "Going to reset all of the hosts, which will destroy all configuration and data, Are you sure?",
		}
		_ = survey.AskOne(prompt, &confirmed)
		if !confirmed {
			return fmt.Errorf("confirmation or --force required to proceed")
		}
	}

	start := time.Now()

	for _, h := range r.Manager.Config.Spec.Hosts {
		h.Reset = true
	}

	lockPhase := &phase.Lock{}
	r.Manager.AddPhase(
		&phase.Connect{},
		&phase.DetectOS{},
		lockPhase,
		&phase.PrepareHosts{},
		&phase.GatherFacts{SkipMachineIDs: true},
		&phase.GatherK0sFacts{},
		&phase.RunHooks{Stage: "before", Action: "reset"},
		&phase.ResetWorkers{
			NoDrain:  true,
			NoDelete: true,
		},
		&phase.ResetControllers{
			NoDrain:  true,
			NoDelete: true,
			NoLeave:  true,
		},
		&phase.ResetLeader{},
		&phase.DaemonReload{},
		&phase.RunHooks{Stage: "after", Action: "reset"},
		&phase.Unlock{Cancel: lockPhase.Cancel},
		&phase.Disconnect{},
	)

	if err := r.Manager.Run(ctx); err != nil {
		return err
	}

	duration := time.Since(start).Truncate(time.Second)
	text := fmt.Sprintf("==> Finished in %s", duration)
	log.Info(phase.Colorize.Green(text).String())

	return nil
}
0707010000001B000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001200000000k0sctl-0.25.1/cmd0707010000001C000081A40000000000000000000000016842976900000F01000000000000000000000000000000000000001B00000000k0sctl-0.25.1/cmd/apply.gopackage cmd

import (
	"fmt"
	"io"
	"os"
	"strings"

	"github.com/k0sproject/k0sctl/action"
	"github.com/k0sproject/k0sctl/phase"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"

	"github.com/urfave/cli/v2"
)

var applyCommand = &cli.Command{
	Name:  "apply",
	Usage: "Apply a k0sctl configuration",
	Flags: []cli.Flag{
		configFlag,
		concurrencyFlag,
		concurrentUploadsFlag,
		dryRunFlag,
		&cli.BoolFlag{
			Name:  "no-wait",
			Usage: "Do not wait for worker nodes to join",
		},
		&cli.BoolFlag{
			Name:  "no-drain",
			Usage: "Do not drain worker nodes when upgrading",
		},
		&cli.StringFlag{
			Name:      "restore-from",
			Usage:     "Path to cluster backup archive to restore the state from",
			TakesFile: true,
		},
		&cli.StringFlag{
			Name:      "kubeconfig-out",
			Usage:     "Write kubeconfig to given path after a successful apply",
			TakesFile: true,
		},
		&cli.StringFlag{
			Name:  "kubeconfig-api-address",
			Usage: "Override the API address in the kubeconfig when kubeconfig-out is set",
		},
		&cli.StringFlag{
			Name:        "kubeconfig-user",
			Usage:       "Set kubernetes username",
			DefaultText: "admin",
		},
		&cli.StringFlag{
			Name:        "kubeconfig-cluster",
			Usage:       "Set kubernetes cluster name",
			DefaultText: "k0s-cluster",
		},
		&cli.BoolFlag{
			Name:   "disable-downgrade-check",
			Usage:  "Skip downgrade check",
			Hidden: true,
		},
		&cli.StringFlag{
			Name:  "evict-taint",
			Usage: "Taint to be applied to nodes before draining and removed after uncordoning in the format of <key=value>:<effect> (default: from spec.options.evictTaint)",
		},
		forceFlag,
		debugFlag,
		traceFlag,
		redactFlag,
		retryIntervalFlag,
		retryTimeoutFlag,
		timeoutFlag,
	},
	Before: actions(initLogging, initConfig, initManager, displayLogo, displayCopyright, warnOldCache),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		var kubeconfigOut io.Writer

		if kc := ctx.String("kubeconfig-out"); kc != "" {
			out, err := os.OpenFile(kc, os.O_CREATE|os.O_WRONLY, 0o600)
			if err != nil {
				return fmt.Errorf("failed to open kubeconfig-out file: %w", err)
			}
			defer out.Close()
			kubeconfigOut = out
		}

		manager, ok := ctx.Context.Value(ctxManagerKey{}).(*phase.Manager)
		if !ok {
			return fmt.Errorf("failed to retrieve manager from context")
		}

		if evictTaint := ctx.String("evict-taint"); evictTaint != "" {
			parts := strings.Split(evictTaint, ":")
			if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
				return fmt.Errorf("invalid evict-taint format, expected <key>:<effect>, got %s", evictTaint)
			}
			manager.Config.Spec.Options.EvictTaint = cluster.EvictTaintOption{
				Enabled: true,
				Taint:   parts[0],
				Effect:  parts[1],
			}
		}

		applyOpts := action.ApplyOptions{
			Manager:               manager,
			KubeconfigOut:         kubeconfigOut,
			KubeconfigAPIAddress:  ctx.String("kubeconfig-api-address"),
			KubeconfigUser:        ctx.String("kubeconfig-user"),
			KubeconfigCluster:     ctx.String("kubeconfig-cluster"),
			NoWait:                ctx.Bool("no-wait") || !manager.Config.Spec.Options.Wait.Enabled,
			NoDrain:               getNoDrainFlagOrConfig(ctx, manager.Config.Spec.Options.Drain),
			DisableDowngradeCheck: ctx.Bool("disable-downgrade-check"),
			RestoreFrom:           ctx.String("restore-from"),
			ConfigPaths:           ctx.StringSlice("config"),
		}

		applyAction := action.NewApply(applyOpts)

		if err := applyAction.Run(ctx.Context); err != nil {
			return fmt.Errorf("apply failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err)
		}

		return nil
	},
}

func getNoDrainFlagOrConfig(ctx *cli.Context, drain cluster.DrainOption) bool {
	if ctx.IsSet("no-drain") {
		return ctx.Bool("no-drain")
	}
	return !drain.Enabled
}
0707010000001D000081A400000000000000000000000168429769000003DD000000000000000000000000000000000000002000000000k0sctl-0.25.1/cmd/apply_test.gopackage cmd

import (
	"flag"
	"testing"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/urfave/cli/v2"
)

func TestGetNoDrainFlagOrConfig(t *testing.T) {
	set := flag.NewFlagSet("test", 0)
	set.Bool("no-drain", false, "test flag")

	app := cli.NewApp()
	ctx := cli.NewContext(app, set, nil)

	cfg := cluster.DrainOption{Enabled: true}
	if got := getNoDrainFlagOrConfig(ctx, cfg); got {
		t.Errorf("Expected false when config.Enabled is true and flag not set, got true")
	}

	cfg.Enabled = false
	if got := getNoDrainFlagOrConfig(ctx, cfg); !got {
		t.Errorf("Expected true when config.Enabled is false and flag not set, got false")
	}

	_ = set.Set("no-drain", "true")
	if got := getNoDrainFlagOrConfig(ctx, cfg); !got {
		t.Errorf("Expected true when flag is set to true, got false")
	}

	_ = set.Set("no-drain", "false")
	if got := getNoDrainFlagOrConfig(ctx, cfg); got {
		t.Errorf("Expected false when flag is set to false, got true")
	}
}
0707010000001E000081A40000000000000000000000016842976900000799000000000000000000000000000000000000001C00000000k0sctl-0.25.1/cmd/backup.gopackage cmd

import (
	"fmt"
	"io"
	"os"
	"path/filepath"
	"time"

	"github.com/k0sproject/k0sctl/action"
	"github.com/k0sproject/k0sctl/phase"
	log "github.com/sirupsen/logrus"
	"github.com/urfave/cli/v2"
)

var backupCommand = &cli.Command{
	Name:  "backup",
	Usage: "Take backup of existing clusters state",
	Flags: []cli.Flag{
		&cli.StringFlag{
			Name:    "output",
			Aliases: []string{"o"},
			Usage:   "Output path for the backup. Default is k0s_backup_<timestamp>.tar.gz in current directory",
		},
		configFlag,
		dryRunFlag,
		concurrencyFlag,
		forceFlag,
		debugFlag,
		traceFlag,
		redactFlag,
		timeoutFlag,
		retryIntervalFlag,
		retryTimeoutFlag,
	},
	Before: actions(initLogging, initConfig, initManager, displayLogo, displayCopyright),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		var resultErr error
		var out io.Writer
		localFile := ctx.String("output")

		if localFile == "" {
			f, err := filepath.Abs(fmt.Sprintf("k0s_backup_%d.tar.gz", time.Now().Unix()))
			if err != nil {
				resultErr = fmt.Errorf("failed to generate local filename: %w", err)
				return resultErr
			}
			localFile = f
		}

		defer func() {
			if out != nil && resultErr != nil && localFile != "" {
				if err := os.Remove(localFile); err != nil {
					log.Warnf("failed to clean up incomplete backup file %s: %s", localFile, err)
				}
			}
		}()

		if out == nil {
			f, err := os.OpenFile(localFile, os.O_RDWR|os.O_CREATE|os.O_SYNC, 0o600)
			if err != nil {
				resultErr = fmt.Errorf("open local file for writing: %w", err)
				return resultErr
			}
			defer f.Close()
			out = f
		}

		backupAction := action.Backup{
			Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager),
			Out:     out,
		}

		if err := backupAction.Run(ctx.Context); err != nil {
			resultErr = fmt.Errorf("backup failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err)
		}

		return resultErr
	},
}
0707010000001F000081A40000000000000000000000016842976900000967000000000000000000000000000000000000002000000000k0sctl-0.25.1/cmd/completion.gopackage cmd

import (
	"fmt"
	"os"
	"path"
	"strings"

	"github.com/urfave/cli/v2"
)

var completionCommand = &cli.Command{
	Name:   "completion",
	Hidden: false,
	Description: `Generates a shell auto-completion script.

   Typical locations for the generated output are:
    - Bash: /etc/bash_completion.d/k0sctl
    - Zsh: /usr/local/share/zsh/site-functions/_k0sctl
    - Fish: ~/.config/fish/completions/k0sctl.fish`,
	Flags: []cli.Flag{
		&cli.StringFlag{
			Name:    "shell",
			Usage:   "Shell to generate the script for",
			Value:   "bash",
			Aliases: []string{"s"},
			EnvVars: []string{"SHELL"},
		},
	},
	Action: func(ctx *cli.Context) error {
		switch path.Base(ctx.String("shell")) {
		case "bash":
			fmt.Fprint(ctx.App.Writer, bashTemplate())
		case "zsh":
			fmt.Fprint(ctx.App.Writer, zshTemplate())
		case "fish":
			t, err := ctx.App.ToFishCompletion()
			if err != nil {
				return err
			}
			fmt.Fprint(ctx.App.Writer, t)
		default:
			return fmt.Errorf("no completion script available for %s", ctx.String("shell"))
		}

		return nil
	},
}

func prog() string {
	p, err := os.Executable()
	if err != nil || strings.HasSuffix(p, "main") {
		return "k0sctl"
	}
	return path.Base(p)
}

func bashTemplate() string {
	return fmt.Sprintf(`#! /bin/bash

_k0sctl_bash_autocomplete() {
  if [[ "${COMP_WORDS[0]}" != "source" ]]; then
    local cur opts base
    COMPREPLY=()
    cur="${COMP_WORDS[COMP_CWORD]}"
    if [[ "$cur" == "-"* ]]; then
      opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} ${cur} --generate-bash-completion )
    else
      opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
    fi
    COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
    return 0
  fi
}

complete -o bashdefault -o default -o nospace -F _k0sctl_bash_autocomplete %s
`, prog())
}

// zshTemplate returns a completion script for zsh
func zshTemplate() string {
	p := prog()
	return fmt.Sprintf(`#compdef %s

_k0sctl_zsh_autocomplete() {
  local -a opts
  local cur
  cur=${words[-1]}
  if [[ "$cur" == "-"* ]]; then
    opts=("${(@f)$(_CLI_ZSH_AUTOCOMPLETE_HACK=1 ${words[@]:0:#words[@]-1} ${cur} --generate-bash-completion)}")
  else
    opts=("${(@f)$(_CLI_ZSH_AUTOCOMPLETE_HACK=1 ${words[@]:0:#words[@]-1} --generate-bash-completion)}")
  fi

  if [[ "${opts[1]}" != "" ]]; then
    _describe 'values' opts
  else
    _files
  fi

  return
}

compdef _k0sctl_zsh_autocomplete %s
`, p, p)
}
07070100000020000081A400000000000000000000000168429769000002E9000000000000000000000000000000000000002100000000k0sctl-0.25.1/cmd/config_edit.gopackage cmd

import (
	"github.com/k0sproject/k0sctl/action"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"

	"github.com/urfave/cli/v2"
)

var configEditCommand = &cli.Command{
	Name:  "edit",
	Usage: "Edit k0s dynamic config in SHELL's default editor",
	Flags: []cli.Flag{
		configFlag,
		debugFlag,
		forceFlag,
		traceFlag,
		redactFlag,
		timeoutFlag,
	},
	Before: actions(initLogging, initConfig),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		configEditAction := action.ConfigEdit{
			Config: ctx.Context.Value(ctxConfigsKey{}).(*v1beta1.Cluster),
			Stdout: ctx.App.Writer,
			Stderr: ctx.App.ErrWriter,
			Stdin:  ctx.App.Reader,
		}

		return configEditAction.Run(ctx.Context)
	},
}
07070100000021000081A40000000000000000000000016842976900000314000000000000000000000000000000000000002300000000k0sctl-0.25.1/cmd/config_status.gopackage cmd

import (
	"github.com/k0sproject/k0sctl/action"

	"github.com/urfave/cli/v2"
)

var configStatusCommand = &cli.Command{
	Name:  "status",
	Usage: "Show k0s dynamic config reconciliation events",
	Flags: []cli.Flag{
		configFlag,
		forceFlag,
		debugFlag,
		traceFlag,
		redactFlag,
		timeoutFlag,
		&cli.StringFlag{
			Name:    "output",
			Usage:   "kubectl output formatting",
			Aliases: []string{"o"},
		},
	},
	Before: actions(initLogging, initConfig),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		cfg, err := readConfig(ctx)
		if err != nil {
			return err
		}

		configStatusAction := action.ConfigStatus{
			Config: cfg,
			Format: ctx.String("output"),
			Writer: ctx.App.Writer,
		}

		return configStatusAction.Run(ctx.Context)
	},
}
07070100000022000081A40000000000000000000000016842976900003628000000000000000000000000000000000000001B00000000k0sctl-0.25.1/cmd/flags.gopackage cmd

import (
	"bytes"
	"context"
	"fmt"
	"io"
	"os"
	"path"
	"path/filepath"
	"runtime"
	"strings"
	"time"

	"github.com/a8m/envsubst"
	"github.com/adrg/xdg"
	glob "github.com/bmatcuk/doublestar/v4"
	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/phase"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/manifest"
	"github.com/k0sproject/k0sctl/pkg/retry"
	k0sctl "github.com/k0sproject/k0sctl/version"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/exec"
	"github.com/logrusorgru/aurora"
	"github.com/shiena/ansicolor"
	log "github.com/sirupsen/logrus"
	"github.com/urfave/cli/v2"
)

type (
	ctxConfigsKey struct{}
	ctxManagerKey struct{}
	ctxLogFileKey struct{}
)

const veryLongTime = 100 * 365 * 24 * time.Hour // 100 years is infinite enough

var (
	globalCancel context.CancelFunc

	debugFlag = &cli.BoolFlag{
		Name:    "debug",
		Usage:   "Enable debug logging",
		Aliases: []string{"d"},
		EnvVars: []string{"DEBUG"},
	}

	dryRunFlag = &cli.BoolFlag{
		Name:    "dry-run",
		Usage:   "Do not alter cluster state, just print what would be done (EXPERIMENTAL)",
		EnvVars: []string{"DRY_RUN"},
	}

	traceFlag = &cli.BoolFlag{
		Name:    "trace",
		Usage:   "Enable trace logging",
		EnvVars: []string{"TRACE"},
		Hidden:  false,
	}

	redactFlag = &cli.BoolFlag{
		Name:  "no-redact",
		Usage: "Do not hide sensitive information in the output",
		Value: false,
	}

	forceFlag = &cli.BoolFlag{
		Name:  "force",
		Usage: "Attempt a forced operation in case of certain failures",
		Action: func(c *cli.Context, force bool) error {
			phase.Force = force
			return nil
		},
	}

	configFlag = &cli.StringSliceFlag{
		Name:      "config",
		Usage:     "Path or glob to config yaml. Can be given multiple times. Use '-' to read from stdin.",
		Aliases:   []string{"c"},
		Value:     cli.NewStringSlice("k0sctl.yaml"),
		TakesFile: true,
	}

	concurrencyFlag = &cli.IntFlag{
		Name:  "concurrency",
		Usage: "Maximum number of hosts to configure in parallel, set to 0 for unlimited",
		Value: 30,
	}

	concurrentUploadsFlag = &cli.IntFlag{
		Name:  "concurrent-uploads",
		Usage: "Maximum number of files to upload in parallel, set to 0 for unlimited",
		Value: 5,
	}

	timeoutFlag = &cli.DurationFlag{
		Name:  "timeout",
		Usage: "Timeout for the whole operation. Set to 0 to wait forever. Can be used to allow more time for the operation to finish before giving up.",
		Value: 0,
		Action: func(ctx *cli.Context, d time.Duration) error {
			if d == 0 {
				d = veryLongTime
			}
			timeoutCtx, cancel := context.WithTimeout(ctx.Context, d)
			ctx.Context = timeoutCtx
			globalCancel = cancel
			return nil
		},
	}

	retryTimeoutFlag = &cli.DurationFlag{
		Name:   "default-timeout",
		Hidden: true,
		Usage:  "Default timeout when waiting for node state changes",
		Value:  retry.DefaultTimeout,
		Action: func(_ *cli.Context, d time.Duration) error {
			log.Warnf("default-timeout flag is deprecated and will be removed, use --timeout instead")
			retry.DefaultTimeout = d
			return nil
		},
	}

	retryIntervalFlag = &cli.DurationFlag{
		Name:   "retry-interval",
		Usage:  "Retry interval when waiting for node state changes",
		Hidden: true,
		Value:  retry.Interval,
		Action: func(_ *cli.Context, d time.Duration) error {
			log.Warnf("retry-interval flag is deprecated and will be removed")
			retry.Interval = d
			return nil
		},
	}

	Colorize = aurora.NewAurora(false)
)

func cancelTimeout(_ *cli.Context) error {
	if globalCancel != nil {
		globalCancel()
	}
	return nil
}

// actions can be used to chain action functions (for urfave/cli's Before, After, etc)
func actions(funcs ...func(*cli.Context) error) func(*cli.Context) error {
	return func(ctx *cli.Context) error {
		for _, f := range funcs {
			if err := f(ctx); err != nil {
				return err
			}
		}
		return nil
	}
}

// initConfig takes the config flag, does some magic and replaces the value with the file contents
func initConfig(ctx *cli.Context) error {
	f := ctx.StringSlice("config")
	if len(f) == 0 || f[0] == "" {
		return nil
	}

	var configs []string
	// detect globs and expand
	for _, p := range f {
		if p == "-" || p == "k0sctl.yaml" {
			configs = append(configs, p)
			continue
		}
		stat, err := os.Stat(p)
		if err == nil {
			if stat.IsDir() {
				p = path.Join(p, "**/*.{yml,yaml}")
			}
		}
		base, pattern := glob.SplitPattern(p)
		fsys := os.DirFS(base)
		matches, err := glob.Glob(fsys, pattern)
		if err != nil {
			return err
		}
		log.Debugf("glob %s expanded to %v", p, matches)
		for _, m := range matches {
			configs = append(configs, path.Join(base, m))
		}
	}

	if len(configs) == 0 {
		return fmt.Errorf("no configuration files found")
	}

	log.Debugf("%d potential configuration files found", len(configs))

	manifestReader := &manifest.Reader{}

	for _, f := range configs {
		file, err := configReader(ctx, f)
		if err != nil {
			return err
		}
		defer file.Close()

		content, err := io.ReadAll(file)
		if err != nil {
			return err
		}

		subst, err := envsubst.Bytes(content)
		if err != nil {
			return err
		}
		if bytes.Equal(subst, content) {
			log.Debugf("no variable substitutions made in %s", f)
		} else {
			log.Debugf("variable substitutions made in %s, before %d after %d bytes", f, len(content), len(subst))
		}

		log.Debugf("parsing configuration from %s", f)

		if err := manifestReader.ParseBytes(subst); err != nil {
			return fmt.Errorf("failed to parse config: %w", err)
		}

		log.Debugf("parsed %d resource definition manifests from %s", manifestReader.Len(), f)
	}

	if manifestReader.Len() == 0 {
		return fmt.Errorf("no resource definition manifests found in configuration files")
	}

	ctx.Context = context.WithValue(ctx.Context, ctxConfigsKey{}, manifestReader)

	return nil
}

func displayCopyright(ctx *cli.Context) error {
	fmt.Fprintf(ctx.App.Writer, "k0sctl %s Copyright 2025, k0sctl authors.\n", k0sctl.Version)
	return nil
}

func warnOldCache(_ *cli.Context) error {
	var olds []string
	home, err := os.UserHomeDir()
	if err == nil {
		olds = append(olds, path.Join(home, ".k0sctl", "cache"))
	}
	if runtime.GOOS == "linux" {
		olds = append(olds, "/var/cache/k0sctl")
	}
	for _, p := range olds {
		if _, err := os.Stat(p); err == nil {
			log.Warnf("An old cache directory still exists at %s, k0sctl now uses %s", p, path.Join(xdg.CacheHome, "k0sctl"))
		}
	}
	return nil
}

func readConfig(ctx *cli.Context) (*v1beta1.Cluster, error) {
	mr, err := ManifestReader(ctx.Context)
	if err != nil {
		return nil, fmt.Errorf("failed to get manifest reader: %w", err)
	}
	ctlConfigs, err := mr.GetResources(v1beta1.APIVersion, "Cluster")
	if err != nil {
		return nil, fmt.Errorf("failed to get cluster resources: %w", err)
	}
	if len(ctlConfigs) != 1 {
		return nil, fmt.Errorf("expected exactly one cluster config, got %d", len(ctlConfigs))
	}
	cfg := &v1beta1.Cluster{}
	if err := ctlConfigs[0].Unmarshal(cfg); err != nil {
		return nil, fmt.Errorf("failed to unmarshal cluster config: %w", err)
	}
	if k0sConfigs, err := mr.GetResources("k0s.k0sproject.io/v1beta1", "ClusterConfig"); err == nil && len(k0sConfigs) > 0 {
		for _, k0sConfig := range k0sConfigs {
			k0s := make(dig.Mapping)
			log.Debugf("unmarshalling %d bytes of config from %v", len(k0sConfig.Raw), k0sConfig.Filename())
			if err := k0sConfig.Unmarshal(&k0s); err != nil {
				return nil, fmt.Errorf("failed to unmarshal k0s config: %w", err)
			}
			log.Debugf("merging in k0s config from %v", k0sConfig.Filename())
			cfg.Spec.K0s.Config.Merge(k0s)
		}
	}
	otherConfigs := mr.FilterResources(func(rd *manifest.ResourceDefinition) bool {
		if strings.EqualFold(rd.APIVersion, v1beta1.APIVersion) && strings.EqualFold(rd.Kind, "cluster") {
			return false
		}
		if strings.EqualFold(rd.APIVersion, "k0s.k0sproject.io/v1beta1") && strings.EqualFold(rd.Kind, "clusterconfig") {
			return false
		}
		return true
	})
	if len(otherConfigs) > 0 {
		cfg.Metadata.Manifests = make(map[string][]byte)
		log.Debugf("found %d additional resources in the configuration", len(otherConfigs))
		for _, otherConfig := range otherConfigs {
			log.Debugf("found resource: %s (%d bytes)", otherConfig.Filename(), len(otherConfig.Raw))
			cfg.Metadata.Manifests[otherConfig.Filename()] = otherConfig.Raw
		}
	}

	if err := cfg.Validate(); err != nil {
		return nil, fmt.Errorf("cluster config validation failed: %w", err)
	}
	return cfg, nil
}

func initManager(ctx *cli.Context) error {
	cfg, err := readConfig(ctx)
	if err != nil {
		return err
	}

	manager, err := phase.NewManager(cfg)
	if err != nil {
		return fmt.Errorf("failed to initialize phase manager: %w", err)
	}

	if ctx.IsSet("concurrency") {
		manager.Concurrency = ctx.Int("concurrency")
	} else {
		manager.Concurrency = cfg.Spec.Options.Concurrency.Limit
	}
	if ctx.IsSet("concurrent-uploads") {
		manager.ConcurrentUploads = ctx.Int("concurrent-uploads")
	} else {
		manager.ConcurrentUploads = cfg.Spec.Options.Concurrency.Uploads
	}
	manager.DryRun = ctx.Bool("dry-run")
	manager.Writer = ctx.App.Writer

	ctx.Context = context.WithValue(ctx.Context, ctxManagerKey{}, manager)

	return nil
}

// initLogging initializes the logger
func initLogging(ctx *cli.Context) error {
	log.SetLevel(log.TraceLevel)
	log.SetOutput(io.Discard)
	initScreenLogger(ctx, logLevelFromCtx(ctx, log.InfoLevel))
	exec.DisableRedact = ctx.Bool("no-redact")
	rig.SetLogger(log.StandardLogger())
	return initFileLogger(ctx)
}

// initSilentLogging initializes the logger in silent mode
// TODO too similar to initLogging
func initSilentLogging(ctx *cli.Context) error {
	log.SetLevel(log.TraceLevel)
	log.SetOutput(io.Discard)
	exec.DisableRedact = ctx.Bool("no-redact")
	initScreenLogger(ctx, logLevelFromCtx(ctx, log.FatalLevel))
	rig.SetLogger(log.StandardLogger())
	return initFileLogger(ctx)
}

func logLevelFromCtx(ctx *cli.Context, defaultLevel log.Level) log.Level {
	if ctx.Bool("trace") {
		return log.TraceLevel
	} else if ctx.Bool("debug") {
		return log.DebugLevel
	} else {
		return defaultLevel
	}
}

func initScreenLogger(ctx *cli.Context, lvl log.Level) {
	log.AddHook(screenLoggerHook(ctx, lvl))
}

func initFileLogger(ctx *cli.Context) error {
	lf, err := LogFile()
	if err != nil {
		return err
	}
	log.AddHook(fileLoggerHook(lf))
	ctx.Context = context.WithValue(ctx.Context, ctxLogFileKey{}, lf.Name())
	return nil
}

const logPath = "k0sctl/k0sctl.log"

func LogFile() (*os.File, error) {
	fn, err := xdg.SearchCacheFile(logPath)
	if err != nil {
		fn, err = xdg.CacheFile(logPath)
		if err != nil {
			return nil, err
		}
	}

	logFile, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE|os.O_APPEND|os.O_SYNC, 0o600)
	if err != nil {
		return nil, fmt.Errorf("Failed to open log %s: %s", fn, err.Error())
	}

	_, _ = fmt.Fprintf(logFile, "time=\"%s\" level=info msg=\"###### New session ######\"\n", time.Now().Format(time.RFC822))

	return logFile, nil
}

func configReader(ctx *cli.Context, f string) (io.ReadCloser, error) {
	if f == "-" {
		if inF, ok := ctx.App.Reader.(*os.File); ok {
			stat, err := inF.Stat()
			if err != nil {
				return nil, fmt.Errorf("can't stat stdin: %s", err.Error())
			}
			if (stat.Mode() & os.ModeCharDevice) == 0 {
				return inF, nil
			}
			return nil, fmt.Errorf("can't read stdin")
		}
		if inCloser, ok := ctx.App.Reader.(io.ReadCloser); ok {
			return inCloser, nil
		}
		return io.NopCloser(ctx.App.Reader), nil
	}

	variants := []string{f}
	// add .yml to default value lookup
	if f == "k0sctl.yaml" {
		variants = append(variants, "k0sctl.yml")
	}

	for _, fn := range variants {
		if _, err := os.Stat(fn); err != nil {
			continue
		}

		fp, err := filepath.Abs(fn)
		if err != nil {
			return nil, err
		}
		file, err := os.Open(fp)
		if err != nil {
			return nil, err
		}

		return file, nil
	}

	return nil, fmt.Errorf("failed to locate configuration")
}

type loghook struct {
	Writer    io.Writer
	Formatter log.Formatter

	levels []log.Level
}

func (h *loghook) SetLevel(level log.Level) {
	h.levels = []log.Level{}
	for _, l := range log.AllLevels {
		if level >= l {
			h.levels = append(h.levels, l)
		}
	}
}

func (h *loghook) Levels() []log.Level {
	return h.levels
}

func (h *loghook) Fire(entry *log.Entry) error {
	line, err := h.Formatter.Format(entry)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Unable to format log entry: %v", err)
		return err
	}
	_, err = h.Writer.Write(line)
	return err
}

func screenLoggerHook(ctx *cli.Context, lvl log.Level) *loghook {
	var forceColors bool
	writer := ctx.App.Writer
	if runtime.GOOS == "windows" {
		writer = ansicolor.NewAnsiColorWriter((ctx.App.Writer))
		forceColors = true
	} else {
		if outF, ok := writer.(*os.File); ok {
			if fi, _ := outF.Stat(); (fi.Mode() & os.ModeCharDevice) != 0 {
				forceColors = true
			}
		}
	}

	if forceColors {
		Colorize = aurora.NewAurora(true)
		phase.Colorize = Colorize
	}

	l := &loghook{
		Writer:    writer,
		Formatter: &log.TextFormatter{DisableTimestamp: lvl < log.DebugLevel, ForceColors: forceColors},
	}

	l.SetLevel(lvl)

	return l
}

func fileLoggerHook(logFile io.Writer) *loghook {
	l := &loghook{
		Formatter: &log.TextFormatter{
			FullTimestamp:          true,
			TimestampFormat:        time.RFC822,
			DisableLevelTruncation: true,
		},
		Writer: logFile,
	}

	l.SetLevel(log.DebugLevel)

	return l
}

func displayLogo(ctx *cli.Context) error {
	fmt.Fprint(ctx.App.Writer, logo)
	return nil
}

// ManifestReader returns a manifest reader from context
func ManifestReader(ctx context.Context) (*manifest.Reader, error) {
	if ctx == nil {
		return nil, fmt.Errorf("context is nil")
	}
	v := ctx.Value(ctxConfigsKey{})
	if v == nil {
		return nil, fmt.Errorf("config reader not found in context")
	}
	if r, ok := v.(*manifest.Reader); ok {
		return r, nil
	}
	return nil, fmt.Errorf("config reader in context is not of the correct type")
}
07070100000023000081A40000000000000000000000016842976900001337000000000000000000000000000000000000001A00000000k0sctl-0.25.1/cmd/init.gopackage cmd

import (
	"bufio"
	"os"
	"strconv"
	"strings"

	"github.com/creasty/defaults"
	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig"

	"github.com/urfave/cli/v2"
	"gopkg.in/yaml.v2"
)

// DefaultK0sYaml is pretty much what "k0s default-config" outputs
var DefaultK0sYaml = []byte(`apiVersion: k0s.k0sproject.io/v1beta1
kind: Cluster
metadata:
  name: k0s
spec:
  api:
    port: 6443
    k0sApiPort: 9443
  storage:
    type: etcd
  network:
    podCIDR: 10.244.0.0/16
    serviceCIDR: 10.96.0.0/12
    provider: kuberouter
    kuberouter:
      mtu: 0
      peerRouterIPs: ""
      peerRouterASNs: ""
      autoMTU: true
    kubeProxy:
      disabled: false
      mode: iptables
  podSecurityPolicy:
    defaultPolicy: 00-k0s-privileged
  telemetry:
    enabled: true
  installConfig:
    users:
      etcdUser: etcd
      kineUser: kube-apiserver
      konnectivityUser: konnectivity-server
      kubeAPIserverUser: kube-apiserver
      kubeSchedulerUser: kube-scheduler
  konnectivity:
    agentPort: 8132
    adminPort: 8133
`)

var defaultHosts = cluster.Hosts{
	&cluster.Host{
		Connection: rig.Connection{
			SSH: &rig.SSH{
				Address: "10.0.0.1",
			},
		},
		Role: "controller",
	},
	&cluster.Host{
		Connection: rig.Connection{
			SSH: &rig.SSH{
				Address: "10.0.0.2",
			},
		},
		Role: "worker",
	},
}

func hostFromAddress(addr, role, user, keypath string) *cluster.Host {
	port := 22

	if idx := strings.Index(addr, "@"); idx > 0 {
		user = addr[:idx]
		addr = addr[idx+1:]
	}

	if idx := strings.Index(addr, ":"); idx > 0 {
		pstr := addr[idx+1:]
		if p, err := strconv.Atoi(pstr); err == nil {
			port = p
		}
		addr = addr[:idx]
	}

	host := &cluster.Host{
		Connection: rig.Connection{
			SSH: &rig.SSH{
				Address: addr,
				Port:    port,
			},
		},
	}
	if role != "" {
		host.Role = role
	} else {
		host.Role = "worker"
	}
	if user != "" {
		host.SSH.User = user
	}

	_ = defaults.Set(host)

	if keypath == "" {
		host.SSH.KeyPath = nil
	} else {
		host.SSH.KeyPath = &keypath
	}

	return host
}

func buildHosts(addresses []string, ccount int, user, keypath string) cluster.Hosts {
	var hosts cluster.Hosts
	role := "controller"
	for _, a := range addresses {
		// strip trailing comments
		if idx := strings.Index(a, "#"); idx > 0 {
			a = a[:idx]
		}
		a = strings.TrimSpace(a)
		if a == "" || strings.HasPrefix(a, "#") {
			// skip empty and comment lines
			continue
		}

		if len(hosts) >= ccount {
			role = "worker"
		}

		hosts = append(hosts, hostFromAddress(a, role, user, keypath))
	}

	if len(hosts) == 0 {
		return defaultHosts
	}

	return hosts
}

var initCommand = &cli.Command{
	Name:        "init",
	Usage:       "Create a configuration template",
	Description: "Outputs a new k0sctl configuration. When a list of addresses are provided, hosts are generated into the configuration. The list of addresses can also be provided via stdin.",
	ArgsUsage:   "[[user@]address[:port] ...]",
	Before:      actions(initLogging),
	Flags: []cli.Flag{
		&cli.BoolFlag{
			Name:  "k0s",
			Usage: "Include a skeleton k0s config section",
		},
		&cli.StringFlag{
			Name:    "cluster-name",
			Usage:   "Cluster name",
			Aliases: []string{"n"},
			Value:   "k0s-cluster",
		},
		&cli.IntFlag{
			Name:    "controller-count",
			Usage:   "The number of controllers to create when addresses are given",
			Aliases: []string{"C"},
			Value:   1,
		},
		&cli.StringFlag{
			Name:    "user",
			Usage:   "Host user when addresses given",
			Aliases: []string{"u"},
		},
		&cli.StringFlag{
			Name:    "key-path",
			Usage:   "Host key path when addresses given",
			Aliases: []string{"i"},
		},
	},
	Action: func(ctx *cli.Context) error {
		var addresses []string

		// Read addresses from stdin
		if inF, ok := ctx.App.Reader.(*os.File); ok {
			stat, err := inF.Stat()
			if err == nil {
				if (stat.Mode() & os.ModeCharDevice) == 0 {
					rd := bufio.NewReader(os.Stdin)
					for {
						row, _, err := rd.ReadLine()
						if err != nil {
							break
						}
						addresses = append(addresses, string(row))
					}
					if err != nil {
						return err
					}

				}
			}
		}

		cfg := v1beta1.Cluster{}

		if err := defaults.Set(&cfg); err != nil {
			return err
		}

		cfg.Metadata.Name = ctx.String("cluster-name")

		// Read addresses from args
		addresses = append(addresses, ctx.Args().Slice()...)
		cfg.Spec.Hosts = buildHosts(addresses, ctx.Int("controller-count"), ctx.String("user"), ctx.String("key-path"))
		for _, h := range cfg.Spec.Hosts {
			_ = defaults.Set(h)
		}

		if ctx.Bool("k0s") {
			cfg.Spec.K0s.Config = dig.Mapping{}
			if err := yaml.Unmarshal(DefaultK0sYaml, &cfg.Spec.K0s.Config); err != nil {
				return err
			}
		}

		encoder := yaml.NewEncoder(os.Stdout)
		return encoder.Encode(&cfg)
	},
}
07070100000024000081A40000000000000000000000016842976900000488000000000000000000000000000000000000001F00000000k0sctl-0.25.1/cmd/init_test.gopackage cmd

import (
	"testing"

	"github.com/stretchr/testify/require"
)

func TestBuildHosts(t *testing.T) {
	addresses := []string{
		"10.0.0.1",
		"",
		"10.0.0.2",
		"10.0.0.3",
	}
	hosts := buildHosts(addresses, 1, "test", "foo")
	require.Len(t, hosts, 3)
	require.Len(t, hosts.Controllers(), 1)
	require.Len(t, hosts.Workers(), 2)
	require.Equal(t, "test", hosts.First().SSH.User)
	require.Equal(t, "foo", *hosts.First().SSH.KeyPath)

	hosts = buildHosts(addresses, 2, "", "")
	require.Len(t, hosts, 3)
	require.Len(t, hosts.Controllers(), 2)
	require.Len(t, hosts.Workers(), 1)
	require.Equal(t, "root", hosts.First().SSH.User)
	require.Nil(t, hosts.First().SSH.KeyPath)
}

func TestBuildHostsWithComments(t *testing.T) {
	addresses := []string{
		"# controllers",
		"10.0.0.1",
		"# workers",
		"10.0.0.2# second worker",
		"10.0.0.3 # last worker",
	}
	hosts := buildHosts(addresses, 1, "", "")
	require.Len(t, hosts, 3)
	require.Len(t, hosts.Controllers(), 1)
	require.Len(t, hosts.Workers(), 2)
	require.Equal(t, "10.0.0.1", hosts[0].Address())
	require.Equal(t, "10.0.0.2", hosts[1].Address())
	require.Equal(t, "10.0.0.3", hosts[2].Address())
}
07070100000025000081A40000000000000000000000016842976900000606000000000000000000000000000000000000002000000000k0sctl-0.25.1/cmd/kubeconfig.gopackage cmd

import (
	"fmt"

	"github.com/k0sproject/k0sctl/action"
	"github.com/k0sproject/k0sctl/phase"
	"github.com/urfave/cli/v2"
)

var kubeconfigCommand = &cli.Command{
	Name:  "kubeconfig",
	Usage: "Output the admin kubeconfig of the cluster",
	Flags: []cli.Flag{
		&cli.StringFlag{
			Name:        "address",
			Value:       "",
			DefaultText: "auto-detect",
		},
		&cli.StringFlag{
			Name:        "user",
			Usage:       "Set kubernetes cluster username",
			Aliases:     []string{"u"},
			DefaultText: "admin",
		},
		&cli.StringFlag{
			Name:        "cluster",
			Usage:       "Set kubernetes cluster name",
			Aliases:     []string{"n"},
			DefaultText: "k0s-cluster",
		},
		configFlag,
		dryRunFlag,
		forceFlag,
		debugFlag,
		traceFlag,
		redactFlag,
		timeoutFlag,
		retryIntervalFlag,
		retryTimeoutFlag,
	},
	Before: actions(initSilentLogging, initConfig, initManager),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		kubeconfigAction := action.Kubeconfig{
			Manager:              ctx.Context.Value(ctxManagerKey{}).(*phase.Manager),
			KubeconfigAPIAddress: ctx.String("address"),
			KubeconfigUser:       ctx.String("user"),
			KubeconfigCluster:    ctx.String("cluster"),
		}

		if err := kubeconfigAction.Run(ctx.Context); err != nil {
			return fmt.Errorf("getting kubeconfig failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err)
		}

		_, err := fmt.Fprintf(ctx.App.Writer, "%s\n", kubeconfigAction.Manager.Config.Metadata.Kubeconfig)
		return err
	},
}
07070100000026000081A40000000000000000000000016842976900000354000000000000000000000000000000000000001A00000000k0sctl-0.25.1/cmd/logo.gopackage cmd

const logo = `
⠀âŖŋâŖŋ⡇⠀⠀âĸ€âŖ´âŖžâŖŋ⠟⠁âĸ¸âŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâĄŋ⠛⠁⠀âĸ¸âŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋ⠀█████████ █████████ ███
⠀âŖŋâŖŋâĄ‡âŖ âŖļâŖŋâĄŋ⠋⠀⠀⠀âĸ¸âŖŋâĄ‡â €â €â €âŖ â €â €âĸ€âŖ âĄ†âĸ¸âŖŋâŖŋ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀███          ███    ███
⠀âŖŋâŖŋâŖŋâŖŋâŖŸâ ‹â €â €â €â €â €âĸ¸âŖŋ⡇⠀âĸ°âŖžâŖŋ⠀⠀âŖŋâŖŋ⡇âĸ¸âŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋâŖŋ⠀███          ███    ███
⠀âŖŋâŖŋ⡏â ģâŖŋâŖˇâŖ¤âĄ€â €â €â €â ¸â ›â â €â ¸â ‹â â €â €âŖŋâŖŋ⡇⠈⠉⠉⠉⠉⠉⠉⠉⠉âĸšâŖŋâŖŋ⠀███          ███    ███
⠀âŖŋâŖŋ⡇⠀⠀⠙âĸŋâŖŋâŖĻâŖ€â €â €â €âŖ âŖļâŖļâŖļâŖļâŖļâŖļâŖŋâŖŋ⡇âĸ°âŖļâŖļâŖļâŖļâŖļâŖļâŖļâŖļâŖžâŖŋâŖŋ⠀█████████    ███    ██████████
`
07070100000027000081A4000000000000000000000001684297690000037C000000000000000000000000000000000000001B00000000k0sctl-0.25.1/cmd/reset.gopackage cmd

import (
	"fmt"

	"github.com/k0sproject/k0sctl/action"
	"github.com/k0sproject/k0sctl/phase"

	"github.com/urfave/cli/v2"
)

var resetCommand = &cli.Command{
	Name:  "reset",
	Usage: "Remove traces of k0s from all of the hosts",
	Flags: []cli.Flag{
		configFlag,
		concurrencyFlag,
		dryRunFlag,
		forceFlag,
		debugFlag,
		traceFlag,
		redactFlag,
		timeoutFlag,
		retryIntervalFlag,
		retryTimeoutFlag,
	},
	Before: actions(initLogging, initConfig, initManager, displayCopyright),
	After:  actions(cancelTimeout),
	Action: func(ctx *cli.Context) error {
		resetAction := action.Reset{
			Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager),
			Stdout:  ctx.App.Writer,
		}

		if err := resetAction.Run(ctx.Context); err != nil {
			return fmt.Errorf("reset failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err)
		}

		return nil
	},
}
07070100000028000081A4000000000000000000000001684297690000065D000000000000000000000000000000000000001A00000000k0sctl-0.25.1/cmd/root.gopackage cmd

import (
	"context"
	"io"
	"os"
	"os/signal"
	"syscall"

	log "github.com/sirupsen/logrus"
	"github.com/urfave/cli/v2"
)

func trapSignals(ctx context.Context, cancel context.CancelFunc) {
	ch := make(chan os.Signal, 2) // Buffer size 2 to catch double signals
	signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)

	go func() {
		sigCount := 0
		for {
			select {
			case <-ctx.Done():
				return
			case <-ch:
				sigCount++
				if sigCount == 1 {
					log.Warn("Aborting... Press Ctrl-C again to exit now.")
					cancel()
				} else {
					log.Error("Forced exit")
					os.Exit(130)
				}
			}
		}
	}()
}

// NewK0sctl returns the urfave/cli.App for k0sctl
func NewK0sctl(in io.Reader, out, errOut io.Writer) *cli.App {
	return &cli.App{
		Name:  "k0sctl",
		Usage: "k0s cluster management tool",
		Flags: []cli.Flag{
			debugFlag,
			traceFlag,
			redactFlag,
		},
		Commands: []*cli.Command{
			versionCommand,
			applyCommand,
			kubeconfigCommand,
			initCommand,
			resetCommand,
			backupCommand,
			{
				Name:  "config",
				Usage: "Configuration related sub-commands",
				Subcommands: []*cli.Command{
					configEditCommand,
					configStatusCommand,
				},
			},
			completionCommand,
		},
		EnableBashCompletion: true,
		Before: func(ctx *cli.Context) error {
			if globalCancel == nil {
				cancelCtx, cancel := context.WithCancel(ctx.Context)
				ctx.Context = cancelCtx
				globalCancel = cancel
			}
			go trapSignals(ctx.Context, globalCancel)
			return nil
		},
		After: func(ctx *cli.Context) error {
			return cancelTimeout(ctx)
		},
		Reader:    in,
		Writer:    out,
		ErrWriter: errOut,
	}
}
07070100000029000081A400000000000000000000000168429769000004A3000000000000000000000000000000000000001D00000000k0sctl-0.25.1/cmd/version.gopackage cmd

import (
	"fmt"
	"os"

	"github.com/k0sproject/k0sctl/integration/github"
	"github.com/k0sproject/k0sctl/version"
	"github.com/urfave/cli/v2"
)

var versionCommand = &cli.Command{
	Name:  "version",
	Usage: "Output k0sctl version",
	Flags: []cli.Flag{
		&cli.BoolFlag{
			Name:  "k0s",
			Usage: "Retrieve the latest k0s version number",
		},
		&cli.BoolFlag{
			Name:  "k0sctl",
			Usage: "Retrieve the latest k0sctl version number",
		},
		&cli.BoolFlag{
			Name:  "pre",
			Usage: "When used in conjunction with --k0s, a pre release is accepted as the latest version",
		},
	},
	Before: func(ctx *cli.Context) error {
		if ctx.Bool("k0s") {
			v, err := github.LatestK0sVersion(ctx.Bool("pre"))
			if err != nil {
				return err
			}
			fmt.Fprintln(ctx.App.Writer, v)
			os.Exit(0)
		}

		if ctx.Bool("k0sctl") {
			v, err := github.LatestRelease(ctx.Bool("pre"))
			if err != nil {
				return err
			}
			fmt.Fprintln(ctx.App.Writer, v.TagName)
			os.Exit(0)
		}

		return nil
	},
	Action: func(ctx *cli.Context) error {
		fmt.Fprintf(ctx.App.Writer, "version: %s\n", version.Version)
		fmt.Fprintf(ctx.App.Writer, "commit: %s\n", version.GitCommit)
		return nil
	},
}
0707010000002A000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001900000000k0sctl-0.25.1/configurer0707010000002B000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001F00000000k0sctl-0.25.1/configurer/linux0707010000002C000081A400000000000000000000000168429769000025EB000000000000000000000000000000000000002200000000k0sctl-0.25.1/configurer/linux.gopackage configurer

import (
	"fmt"
	"path"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"time"

	"al.essio.dev/pkg/shellescape"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/version"
)

// Linux is a base module for various linux OS support packages
type Linux struct {
	paths  map[string]string
	pathMu sync.Mutex
}

// NOTE The Linux struct does not embed rig/os.Linux because it will confuse
// go as the distro-configurers' parents embed it too. This means you can't
// add functions to base Linux package that call functions in the rig/os.Linux package,
// you can however write those functions in the distro-configurers.
// An example of this problem is the ReplaceK0sTokenPath function, which would like to
// call `l.ServiceScriptPath("kos")`, which was worked around here by getting the
// path as a parameter.

func (l *Linux) initPaths() {
	if l.paths != nil {
		return
	}
	l.paths = map[string]string{
		"K0sBinaryPath":      "/usr/local/bin/k0s",
		"K0sConfigPath":      "/etc/k0s/k0s.yaml",
		"K0sJoinTokenPath":   "/etc/k0s/k0stoken",
		"DataDirDefaultPath": "/var/lib/k0s",
	}
}

// K0sBinaryPath returns the path to the k0s binary on the host
func (l *Linux) K0sBinaryPath() string {
	l.pathMu.Lock()
	defer l.pathMu.Unlock()

	l.initPaths()
	return l.paths["K0sBinaryPath"]
}

// K0sConfigPath returns the path to the k0s config file on the host
func (l *Linux) K0sConfigPath() string {
	l.pathMu.Lock()
	defer l.pathMu.Unlock()

	l.initPaths()
	return l.paths["K0sConfigPath"]
}

// K0sJoinTokenPath returns the path to the k0s join token file on the host
func (l *Linux) K0sJoinTokenPath() string {
	l.pathMu.Lock()
	defer l.pathMu.Unlock()

	l.initPaths()
	return l.paths["K0sJoinTokenPath"]
}

// DataDirDefaultPath returns the path to the k0s data dir on the host
func (l *Linux) DataDirDefaultPath() string {
	l.pathMu.Lock()
	defer l.pathMu.Unlock()

	l.initPaths()
	return l.paths["DataDirDefaultPath"]
}

// SetPath sets a path for a key
func (l *Linux) SetPath(key, value string) {
	l.pathMu.Lock()
	defer l.pathMu.Unlock()

	l.initPaths()
	l.paths[key] = value
}

// Arch returns the host processor architecture in the format k0s expects it
func (l *Linux) Arch(h os.Host) (string, error) {
	arch, err := h.ExecOutput("uname -m")
	if err != nil {
		return "", err
	}
	switch arch {
	case "x86_64":
		return "amd64", nil
	case "aarch64":
		return "arm64", nil
	case "armv7l", "armv8l", "aarch32", "arm32", "armhfp", "arm-32":
		return "arm", nil
	default:
		return arch, nil
	}
}

// K0sCmdf can be used to construct k0s commands in sprintf style.
func (l *Linux) K0sCmdf(template string, args ...interface{}) string {
	return fmt.Sprintf("%s %s", l.K0sBinaryPath(), fmt.Sprintf(template, args...))
}

func (l *Linux) K0sBinaryVersion(h os.Host) (*version.Version, error) {
	k0sVersionCmd := l.K0sCmdf("version")
	output, err := h.ExecOutput(k0sVersionCmd, exec.Sudo(h))
	if err != nil {
		return nil, err
	}

	version, err := version.NewVersion(output)
	if err != nil {
		return nil, err
	}

	return version, nil
}

// K0sctlLockFilePath returns a path to a lock file
func (l *Linux) K0sctlLockFilePath(h os.Host) string {
	if h.Exec("test -d /run/lock", exec.Sudo(h)) == nil {
		return "/run/lock/k0sctl"
	}

	return "/tmp/k0sctl.lock"
}

// TempFile returns a temp file path
func (l *Linux) TempFile(h os.Host) (string, error) {
	return h.ExecOutput("mktemp")
}

// TempDir returns a temp dir path
func (l *Linux) TempDir(h os.Host) (string, error) {
	return h.ExecOutput("mktemp -d")
}

var trailingNumberRegex = regexp.MustCompile(`(\d+)$`)

func trailingNumber(s string) (int, bool) {
	match := trailingNumberRegex.FindStringSubmatch(s)
	if len(match) > 0 {
		i, err := strconv.Atoi(match[1])
		if err == nil {
			return i, true
		}
	}
	return 0, false
}

// DownloadURL performs a download from a URL on the host
func (l *Linux) DownloadURL(h os.Host, url, destination string, opts ...exec.Option) error {
	err := h.Exec(fmt.Sprintf(`curl -sSLf -o %s %s`, shellescape.Quote(destination), shellescape.Quote(url)), opts...)
	if err != nil {
		if exitCode, ok := trailingNumber(err.Error()); ok && exitCode == 22 {
			return fmt.Errorf("download failed: http 404 - not found: %w", err)
		}
		return fmt.Errorf("download failed: %w", err)
	}
	return nil
}

// DownloadK0s performs k0s binary download from github on the host
func (l *Linux) DownloadK0s(h os.Host, path string, version *version.Version, arch string, opts ...exec.Option) error {
	v := strings.ReplaceAll(strings.TrimPrefix(version.String(), "v"), "+", "%2B")
	url := fmt.Sprintf("https://github.com/k0sproject/k0s/releases/download/v%[1]s/k0s-v%[1]s-%[2]s", v, arch)
	if err := l.DownloadURL(h, url, path, opts...); err != nil {
		return fmt.Errorf("failed to download k0s - check connectivity and k0s version validity: %w", err)
	}

	return nil
}

// ReplaceK0sTokenPath replaces the config path in the service stub
func (l *Linux) ReplaceK0sTokenPath(h os.Host, spath string) error {
	return h.Exec(fmt.Sprintf("sed -i 's^REPLACEME^%s^g' %s", l.K0sJoinTokenPath(), spath))
}

// FileContains returns true if a file contains the substring
func (l *Linux) FileContains(h os.Host, path, s string) bool {
	return h.Execf(`grep -q "%s" "%s"`, s, path, exec.Sudo(h)) == nil
}

// MoveFile moves a file on the host
func (l *Linux) MoveFile(h os.Host, src, dst string) error {
	return h.Execf(`mv "%s" "%s"`, src, dst, exec.Sudo(h))
}

// KubeconfigPath returns the path to a kubeconfig on the host
func (l *Linux) KubeconfigPath(h os.Host, dataDir string) string {
	linux := &os.Linux{}

	// if admin.conf exists, use that
	adminConfPath := path.Join(dataDir, "pki/admin.conf")
	if linux.FileExist(h, adminConfPath) {
		return adminConfPath
	}
	return path.Join(dataDir, "kubelet.conf")
}

// KubectlCmdf returns a command line in sprintf manner for running kubectl on the host using the kubeconfig from KubeconfigPath
func (l *Linux) KubectlCmdf(h os.Host, dataDir, s string, args ...interface{}) string {
	return fmt.Sprintf(`env "KUBECONFIG=%s" %s`, l.KubeconfigPath(h, dataDir), l.K0sCmdf(`kubectl %s`, fmt.Sprintf(s, args...)))
}

// HTTPStatus makes a HTTP GET request to the url and returns the status code or an error
func (l *Linux) HTTPStatus(h os.Host, url string) (int, error) {
	output, err := h.ExecOutput(fmt.Sprintf(`curl -kso /dev/null --connect-timeout 20 -w "%%{http_code}" "%s"`, url))
	if err != nil {
		return -1, err
	}
	status, err := strconv.Atoi(output)
	if err != nil {
		return -1, fmt.Errorf("invalid response: %s", err.Error())
	}

	return status, nil
}

const sbinPath = `PATH=/usr/local/sbin:/usr/sbin:/sbin:$PATH`

// PrivateInterface tries to find a private network interface
func (l *Linux) PrivateInterface(h os.Host) (string, error) {
	output, err := h.ExecOutput(fmt.Sprintf(`%s; (ip route list scope global | grep -E "\b(172|10|192\.168)\.") || (ip route list | grep -m1 default)`, sbinPath))
	if err == nil {
		re := regexp.MustCompile(`\bdev (\w+)`)
		match := re.FindSubmatch([]byte(output))
		if len(match) > 0 {
			return string(match[1]), nil
		}
		err = fmt.Errorf("can't find 'dev' in output")
	}

	return "", fmt.Errorf("failed to detect a private network interface, define the host privateInterface manually (%s)", err.Error())
}

// PrivateAddress resolves internal ip from private interface
func (l *Linux) PrivateAddress(h os.Host, iface, publicip string) (string, error) {
	output, err := h.ExecOutput(fmt.Sprintf("%s ip -o addr show dev %s scope global", sbinPath, iface))
	if err != nil {
		return "", fmt.Errorf("failed to find private interface with name %s: %s. Make sure you've set correct 'privateInterface' for the host in config", iface, output)
	}

	lines := strings.Split(output, "\n")
	for _, line := range lines {
		items := strings.Fields(line)
		if len(items) < 4 {
			continue
		}
		// When subnet mask is 255.255.255.255, CIDR notation is not /32, but it is omitted instead.
		index := strings.Index(items[3], "/")
		addr := items[3]
		if index >= 0 {
			addr = items[3][:index]
		}
		if len(strings.Split(addr, ".")) == 4 {
			if publicip != addr {
				return addr, nil
			}
		}
	}

	return "", fmt.Errorf("not found")
}

// UpsertFile creates a file in path with content only if the file does not exist already
func (l *Linux) UpsertFile(h os.Host, path, content string) error {
	tmpf, err := l.TempFile(h)
	if err != nil {
		return err
	}
	if err := h.Execf(`cat > "%s"`, tmpf, exec.Stdin(content), exec.Sudo(h)); err != nil {
		return err
	}

	defer func() {
		_ = h.Execf(`rm -f "%s"`, tmpf, exec.Sudo(h))
	}()

	// mv -n is atomic
	if err := h.Execf(`mv -n "%s" "%s"`, tmpf, path, exec.Sudo(h)); err != nil {
		return fmt.Errorf("upsert failed: %w", err)
	}

	// if original tempfile still exists, error out
	if h.Execf(`test -f "%s"`, tmpf) == nil {
		return fmt.Errorf("upsert failed")
	}

	return nil
}

func (l *Linux) DeleteDir(h os.Host, path string, opts ...exec.Option) error {
	return h.Exec(fmt.Sprintf(`rmdir %s`, shellescape.Quote(path)), opts...)
}

func (l *Linux) MachineID(h os.Host) (string, error) {
	return h.ExecOutput(`cat /etc/machine-id || cat /var/lib/dbus/machine-id`)
}

// SystemTime returns the system time as UTC reported by the OS or an error if this fails
func (l *Linux) SystemTime(h os.Host) (time.Time, error) {
	// get utc time as a unix timestamp
	out, err := h.ExecOutput("date -u +\"%s\"")
	if err != nil {
		return time.Time{}, fmt.Errorf("failed to get system time: %w", err)
	}
	unixTime, err := strconv.ParseInt(out, 10, 64)
	if err != nil {
		return time.Time{}, fmt.Errorf("failed to parse system time: %w", err)
	}
	return time.Unix(unixTime, 0), nil
}
0707010000002D000081A40000000000000000000000016842976900000353000000000000000000000000000000000000002900000000k0sctl-0.25.1/configurer/linux/alpine.gopackage linux

import (
	"strings"

	"github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
)

// BaseLinux for tricking go interfaces
type BaseLinux struct {
	configurer.Linux
}

// Alpine provides OS support for Alpine Linux
type Alpine struct {
	os.Linux
	BaseLinux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "alpine"
		},
		func() interface{} {
			return &Alpine{}
		},
	)
}

// InstallPackage installs packages via slackpkg
func (l *Alpine) InstallPackage(h os.Host, pkg ...string) error {
	return h.Execf("apk add --update %s", strings.Join(pkg, " "), exec.Sudo(h))
}

func (l *Alpine) Prepare(h os.Host) error {
	return l.InstallPackage(h, "findutils", "coreutils")
}
0707010000002E000081A400000000000000000000000168429769000000D9000000000000000000000000000000000000002E00000000k0sctl-0.25.1/configurer/linux/alpine_test.gopackage linux

import (
	"testing"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

func TestAlpineConfigurerInterface(t *testing.T) {
	h := cluster.Host{}
	h.Configurer = &Alpine{}
}
0707010000002F000081A400000000000000000000000168429769000001DF000000000000000000000000000000000000002C00000000k0sctl-0.25.1/configurer/linux/archlinux.gopackage linux

import (
	"github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/linux"
	"github.com/k0sproject/rig/os/registry"
)

// Archlinux provides OS support for Archlinux systems
type Archlinux struct {
	linux.Archlinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "arch" || os.IDLike == "arch"
		},
		func() interface{} {
			return &Archlinux{}
		},
	)
}
07070100000030000081A40000000000000000000000016842976900000275000000000000000000000000000000000000002900000000k0sctl-0.25.1/configurer/linux/coreos.gopackage linux

import (
	"errors"
	"strings"

	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
)

// CoreOS provides OS support for ostree based Fedora & RHEL systems
type CoreOS struct {
	os.Linux
	BaseLinux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return strings.Contains(os.Name, "CoreOS") && (os.ID == "fedora" || os.ID == "rhel")
		},
		func() interface{} {
			return &CoreOS{}
		},
	)
}

func (l *CoreOS) InstallPackage(h os.Host, pkg ...string) error {
	return errors.New("CoreOS does not support installing packages manually")
}
07070100000031000081A400000000000000000000000168429769000001BB000000000000000000000000000000000000002900000000k0sctl-0.25.1/configurer/linux/debian.gopackage linux

import (
	"github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/linux"
	"github.com/k0sproject/rig/os/registry"
)

// Debian provides OS support for Debian systems
type Debian struct {
	linux.Ubuntu
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "debian"
		},
		func() interface{} {
			return &Debian{}
		},
	)
}
07070100000032000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002F00000000k0sctl-0.25.1/configurer/linux/enterpriselinux07070100000033000081A4000000000000000000000001684297690000010F000000000000000000000000000000000000003200000000k0sctl-0.25.1/configurer/linux/enterpriselinux.gopackage linux

import (
	"github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/rig/os/linux"
)

// EnterpriseLinux is a base package for several RHEL-like enterprise linux distributions
type EnterpriseLinux struct {
	linux.EnterpriseLinux
	configurer.Linux
}
07070100000034000081A400000000000000000000000168429769000001EC000000000000000000000000000000000000003C00000000k0sctl-0.25.1/configurer/linux/enterpriselinux/almalinux.gopackage enterpriselinux

import (
	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// AlmaLinux provides OS support for AlmaLinux
type AlmaLinux struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "almalinux"
		},
		func() interface{} {
			return &AlmaLinux{}
		},
	)
}
07070100000035000081A400000000000000000000000168429769000002BA000000000000000000000000000000000000003900000000k0sctl-0.25.1/configurer/linux/enterpriselinux/amazon.gopackage enterpriselinux

import (
	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
)

// AmazonLinux provides OS support for AmazonLinux
type AmazonLinux struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

// Hostname on amazon linux will return the full hostname
func (l *AmazonLinux) Hostname(h os.Host) string {
	hostname, _ := h.ExecOutput("hostname")

	return hostname
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "amzn"
		},
		func() interface{} {
			return &AmazonLinux{}
		},
	)
}
07070100000036000081A400000000000000000000000168429769000001DD000000000000000000000000000000000000003900000000k0sctl-0.25.1/configurer/linux/enterpriselinux/centos.gopackage enterpriselinux

import (
	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// CentOS provides OS support for CentOS
type CentOS struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "centos"
		},
		func() interface{} {
			return &CentOS{}
		},
	)
}
07070100000037000081A40000000000000000000000016842976900000211000000000000000000000000000000000000003900000000k0sctl-0.25.1/configurer/linux/enterpriselinux/fedora.gopackage enterpriselinux

import (
	"strings"

	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// Fedora provides OS support for Fedora
type Fedora struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "fedora" && !strings.Contains(os.Name, "CoreOS")
		},
		func() interface{} {
			return &Fedora{}
		},
	)
}
07070100000038000081A400000000000000000000000168429769000001EE000000000000000000000000000000000000003900000000k0sctl-0.25.1/configurer/linux/enterpriselinux/oracle.gopackage enterpriselinux

import (
	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// OracleLinux provides OS support for Oracle Linuc
type OracleLinux struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "ol"
		},
		func() interface{} {
			return &OracleLinux{}
		},
	)
}
07070100000039000081A400000000000000000000000168429769000001DD000000000000000000000000000000000000003700000000k0sctl-0.25.1/configurer/linux/enterpriselinux/rhel.gopackage enterpriselinux

import (
	"strings"

	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// RHEL provides OS support for RedHat Enterprise Linux
type RHEL struct {
	k0slinux.EnterpriseLinux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "rhel" && !strings.Contains(os.Name, "CoreOS")
		},
		func() interface{} {
			return &RHEL{}
		},
	)
}
0707010000003A000081A400000000000000000000000168429769000001EC000000000000000000000000000000000000003800000000k0sctl-0.25.1/configurer/linux/enterpriselinux/rocky.gopackage enterpriselinux

import (
	"github.com/k0sproject/k0sctl/configurer"
	k0slinux "github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// RockyLinux provides OS support for RockyLinux
type RockyLinux struct {
	k0slinux.EnterpriseLinux
	configurer.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "rocky"
		},
		func() interface{} {
			return &RockyLinux{}
		},
	)
}
0707010000003B000081A40000000000000000000000016842976900000237000000000000000000000000000000000000002A00000000k0sctl-0.25.1/configurer/linux/flatcar.gopackage linux

import (
	"errors"

	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
)

type Flatcar struct {
	BaseLinux
	os.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "flatcar"
		},
		func() interface{} {
			fc := &Flatcar{}
			fc.SetPath("K0sBinaryPath", "/opt/bin/k0s")
			return fc
		},
	)
}

func (l *Flatcar) InstallPackage(h os.Host, pkg ...string) error {
	return errors.New("FlatcarContainerLinux does not support installing packages manually")
}
0707010000003C000081A4000000000000000000000001684297690000070E000000000000000000000000000000000000002D00000000k0sctl-0.25.1/configurer/linux/linux_test.gopackage linux

import (
	"fmt"
	"io"
	"io/fs"
	"testing"

	"github.com/k0sproject/rig/exec"
	"github.com/stretchr/testify/require"
)

type mockHost struct {
	ExecFError bool
}

func (m mockHost) Upload(source, destination string, perm fs.FileMode, opts ...exec.Option) error {
	return nil
}

func (m mockHost) Exec(string, ...exec.Option) error {
	return nil
}

func (m mockHost) ExecOutput(string, ...exec.Option) (string, error) {
	return "", nil
}

func (m mockHost) Execf(string, ...interface{}) error {
	if m.ExecFError {
		return fmt.Errorf("error")
	}
	return nil
}

func (m mockHost) ExecOutputf(string, ...interface{}) (string, error) {
	return "", nil
}

func (m mockHost) ExecStreams(cmd string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, opts ...exec.Option) (exec.Waiter, error) {
	return nil, nil
}

func (m mockHost) String() string {
	return ""
}

func (m mockHost) Sudo(string) (string, error) {
	return "", nil
}

func TestPaths(t *testing.T) {
	fc := &Flatcar{}
	fc.SetPath("K0sBinaryPath", "/opt/bin/k0s")

	ubuntu := &Ubuntu{}

	h1 := &mockHost{
		ExecFError: false,
	}
	h2 := &mockHost{
		ExecFError: true,
	}

	require.Equal(t, "/opt/bin/k0s", fc.K0sBinaryPath())
	require.Equal(t, "/usr/local/bin/k0s", ubuntu.K0sBinaryPath())

	require.Equal(t, "/opt/bin/k0s --help", fc.K0sCmdf("--help"))
	require.Equal(t, "/usr/local/bin/k0s --help", ubuntu.K0sCmdf("--help"))

	require.Equal(t, "/var/lib/k0s/pki/admin.conf", fc.KubeconfigPath(h1, fc.DataDirDefaultPath()))
	require.Equal(t, "/var/lib/k0s/pki/admin.conf", ubuntu.KubeconfigPath(h1, ubuntu.DataDirDefaultPath()))

	require.Equal(t, "/var/lib/k0s/kubelet.conf", fc.KubeconfigPath(h2, fc.DataDirDefaultPath()))
	require.Equal(t, "/var/lib/k0s/kubelet.conf", ubuntu.KubeconfigPath(h2, ubuntu.DataDirDefaultPath()))
}
0707010000003D000081A40000000000000000000000016842976900000171000000000000000000000000000000000000002B00000000k0sctl-0.25.1/configurer/linux/opensuse.gopackage linux

import (
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// OpenSUSE provides OS support for OpenSUSE
type OpenSUSE struct {
	SLES
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "opensuse" || os.ID == "opensuse-microos"
		},
		func() interface{} {
			return &OpenSUSE{}
		},
	)
}
0707010000003E000081A40000000000000000000000016842976900000321000000000000000000000000000000000000002C00000000k0sctl-0.25.1/configurer/linux/slackware.gopackage linux

import (
	"fmt"
	"strings"

	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
)

// Slackware provides OS support for Slackware Linux
type Slackware struct {
	BaseLinux
	os.Linux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "slackware"
		},
		func() interface{} {
			return &Slackware{}
		},
	)
}

// InstallPackage installs packages via slackpkg
func (l *Slackware) InstallPackage(h os.Host, pkg ...string) error {
	updatecmd, err := h.Sudo("slackpkg update")
	if err != nil {
		return err
	}
	installcmd, err := h.Sudo(fmt.Sprintf("slackpkg install --priority ADD %s", strings.Join(pkg, " ")))
	if err != nil {
		return err
	}

	return h.Execf("%s && %s", updatecmd, installcmd)
}
0707010000003F000081A400000000000000000000000168429769000001BC000000000000000000000000000000000000002700000000k0sctl-0.25.1/configurer/linux/sles.gopackage linux

import (
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/linux"
	"github.com/k0sproject/rig/os/registry"
)

// SLES provides OS support for Suse SUSE Linux Enterprise Server
type SLES struct {
	linux.SLES
	os.Linux
	BaseLinux
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "sles"
		},
		func() interface{} {
			return &SLES{}
		},
	)
}
07070100000040000081A40000000000000000000000016842976900000152000000000000000000000000000000000000002900000000k0sctl-0.25.1/configurer/linux/ubuntu.gopackage linux

import (
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os/registry"
)

// Ubuntu provides OS support for Ubuntu systems
type Ubuntu struct {
	Debian
}

func init() {
	registry.RegisterOSModule(
		func(os rig.OSVersion) bool {
			return os.ID == "ubuntu"
		},
		func() interface{} {
			return &Ubuntu{}
		},
	)
}
07070100000041000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001300000000k0sctl-0.25.1/docs07070100000042000081A40000000000000000000000016842976900000000000000000000000000000000000000000000001C00000000k0sctl-0.25.1/docs/.gitkeep07070100000043000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001700000000k0sctl-0.25.1/examples07070100000044000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001E00000000k0sctl-0.25.1/examples/aws-tf07070100000045000081A4000000000000000000000001684297690000005D000000000000000000000000000000000000002900000000k0sctl-0.25.1/examples/aws-tf/.gitignoreterraform.tfvars
.terraform
terraform.tfstate*
aws_private.pem
.terraform.lock.hcl
kubeconfig07070100000046000081A4000000000000000000000001684297690000010B000000000000000000000000000000000000002700000000k0sctl-0.25.1/examples/aws-tf/Makefile
apply:
	terraform init
	terraform apply -auto-approve
	terraform output -raw k0s_cluster | go run ../../main.go apply --config -

destroy:
	terraform destroy -auto-approve

kubeconfig:
	terraform output -raw k0s_cluster | go run ../../main.go kubeconfig --config -
	07070100000047000081A400000000000000000000000168429769000003E4000000000000000000000000000000000000002800000000k0sctl-0.25.1/examples/aws-tf/README.md# Bootstrapping a k0s cluster on AWS using Terraform

This directory provides an example flow with `k0sctl` tool together with Terraform using AWS as the cloud provider.

## Prerequisites
- You need an account and AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN for AWS
- Terraform >=v0.14.3 installed
- You will need the `k0sctl` binary in `PATH` 

## TF Steps
- `terraform init`
- `terraform apply`
- `terraform output -raw k0s_cluster | k0sctl apply --config -` NOTE: this assumes that `k0sctl` binary is available in the `PATH`

This will create a cluster with single controller and worker nodes. 
If you want to override the default behaviour. Create a `terraform.tfvars` file with the needed details. You can use the provided `terraform.tfvars.example` as a template.

## Makefile steps

In case you don't want to do all those steps you can use the Makefile. 

To deploy a k0s cluster with k0sctl:
- `make apply` 

Get kubeconfig:
- `make kubeconfig`
Teardown:
- `make destroy`
07070100000048000081A400000000000000000000000168429769000001C6000000000000000000000000000000000000002C00000000k0sctl-0.25.1/examples/aws-tf/controller.tfresource "aws_instance" "cluster-controller" {
  count         = var.controller_count
  ami           = data.aws_ami.ubuntu.id
  instance_type = var.cluster_flavor

  tags = {
    Name = "controller"
  }
  key_name                    = aws_key_pair.cluster-key.key_name
  vpc_security_group_ids      = [aws_security_group.cluster_allow_ssh.id]
  associate_public_ip_address = true

  root_block_device {
    volume_type = "gp2"
    volume_size = 10
  }
}07070100000049000081A400000000000000000000000168429769000007B9000000000000000000000000000000000000002600000000k0sctl-0.25.1/examples/aws-tf/main.tfterraform {
  required_version = ">= 0.14.3"
}

provider "aws" {
  region = "eu-north-1"
}

resource "tls_private_key" "k0sctl" {
  algorithm = "RSA"
  rsa_bits  = 4096
}

resource "aws_key_pair" "cluster-key" {
  key_name   = format("%s_key", var.cluster_name)
  public_key = tls_private_key.k0sctl.public_key_openssh
}

// Save the private key to filesystem
resource "local_file" "aws_private_pem" {
  file_permission = "600"
  filename        = format("%s/%s", path.module, "aws_private.pem")
  content         = tls_private_key.k0sctl.private_key_pem
}

resource "aws_security_group" "cluster_allow_ssh" {
  name        = format("%s-allow-ssh", var.cluster_name)
  description = "Allow ssh inbound traffic"
  // vpc_id      = aws_vpc.cluster-vpc.id

  // Allow all incoming and outgoing ports.
  // TODO: need to create a more restrictive policy
  ingress {
    description = "SSH from VPC"
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = format("%s-allow-ssh", var.cluster_name)
  }
}

data "aws_ami" "ubuntu" {
  most_recent = true

  filter {
    name   = "name"
    values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
  }

  filter {
    name   = "virtualization-type"
    values = ["hvm"]
  }

  owners = ["099720109477"]
}


locals {
  k0s_tmpl = {
    apiVersion = "k0sctl.k0sproject.io/v1beta1"
    kind       = "cluster"
    spec = {
      hosts = [
        for host in concat(aws_instance.cluster-controller, aws_instance.cluster-workers) : {
          ssh = {
            address = host.public_ip
            user    = "ubuntu"
            keyPath = "./aws_private.pem"
          }
          role = host.tags["Name"]
        }
      ]
      k0s = {
        version = "0.13.1"
      }
    }
  }
}

output "k0s_cluster" {
  value = yamlencode(local.k0s_tmpl)

}
0707010000004A000081A4000000000000000000000001684297690000005E000000000000000000000000000000000000003700000000k0sctl-0.25.1/examples/aws-tf/terraform.tfvars.examplecluster_name = "pick_a_name"
controller_count = 1
worker_count = 2
cluster_flavor = "t2.large"0707010000004B000081A4000000000000000000000001684297690000010D000000000000000000000000000000000000002B00000000k0sctl-0.25.1/examples/aws-tf/variables.tfvariable "cluster_name" {
  type    = string
  default = "k0sctl"
}

variable "controller_count" {
  type    = number
  default = 1
}

variable "worker_count" {
  type    = number
  default = 1
}

variable "cluster_flavor" {
  type    = string
  default = "t3.large"
}
0707010000004C000081A400000000000000000000000168429769000001D7000000000000000000000000000000000000002800000000k0sctl-0.25.1/examples/aws-tf/worker.tfresource "aws_instance" "cluster-workers" {
  count         = var.worker_count
  ami           = data.aws_ami.ubuntu.id
  instance_type = var.cluster_flavor
  tags = {
    Name = "worker"
  }
  key_name                    = aws_key_pair.cluster-key.key_name
  vpc_security_group_ids      = [aws_security_group.cluster_allow_ssh.id]
  associate_public_ip_address = true
  source_dest_check = false

  root_block_device {
    volume_type = "gp2"
    volume_size = 20
  }
}
0707010000004D000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002100000000k0sctl-0.25.1/examples/bootloose0707010000004E000081A40000000000000000000000016842976900000016000000000000000000000000000000000000002C00000000k0sctl-0.25.1/examples/bootloose/.gitignorek0sctl.yaml
binaries/
0707010000004F000081A4000000000000000000000001684297690000030C000000000000000000000000000000000000003000000000k0sctl-0.25.1/examples/bootloose/bootloose.yamlcluster:
  name: k0s
  privateKey: ~/.ssh/id_rsa
machines:
- count: 1
  backend: ignite
  spec:
    image: weaveworks/ignite-ubuntu:18.04
    name: controller%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 443
    - containerPort: 6443
- count: 1
  backend: ignite
  spec:
    image: weaveworks/ignite-centos:7
    name: worker%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 6443
07070100000050000081A400000000000000000000000168429769000000FD000000000000000000000000000000000000003500000000k0sctl-0.25.1/examples/bootloose/k0sctl.yaml.exampleapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      ssh:
        address: 127.0.0.1
        port: 9022
    - role: worker
      ssh:
        address: 127.0.0.1
        port: 9023
  k0s:
    version: 0.12.1
07070100000051000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002200000000k0sctl-0.25.1/examples/hetzner-tf07070100000052000081A4000000000000000000000001684297690000002F000000000000000000000000000000000000002D00000000k0sctl-0.25.1/examples/hetzner-tf/.gitignoreterraform.tfvars
.terraform
terraform.tfstate*
07070100000053000081A400000000000000000000000168429769000001EF000000000000000000000000000000000000002C00000000k0sctl-0.25.1/examples/hetzner-tf/README.md# Bootstrapping k0s cluster on Hetzner

This directory provides an example flow with `k0sctl` tool together with Terraform using Hetzner as the cloud provider.

## Prerequisites
- You need an account and API token for Hetzner
- Terraform installed
- k0sctl installed

## Steps
Create terraform.tfvars file with needed details. You can use the provided terraform.tfvars.example as a baseline.
- `terraform init`
- `terraform apply`
- `terraform output -raw k0s_cluster | k0sctl apply --config -`
07070100000054000081A40000000000000000000000016842976900000E01000000000000000000000000000000000000002A00000000k0sctl-0.25.1/examples/hetzner-tf/main.tfterraform {
  required_providers {
      hcloud = {
          source = "hetznercloud/hcloud"
          version = "~> 1.24"
      }
  }
}
variable "hcloud_token" {
    description = "Hetzner API token"
}

provider "hcloud" {
  token = var.hcloud_token
}

variable "ssh_keys" {
    default = []
}

variable "ssh_user" {
    default = "root"
}

variable "cluster_name" {
    default = "k0s"
}

variable "location" {
    default = "hel1"
}

variable "image" {
    default = "ubuntu-18.04"
}

variable "controller_type" {
    default = "cx31"
}

variable "controller_count" {
    default = 1
}

variable "worker_count" {
    default = 1
}

variable "worker_type" {
    default = "cx31"
}

resource "hcloud_server" "controller" {
    count = var.controller_count
    name = "${var.cluster_name}-controller-${count.index}"
    image = var.image
    server_type = var.controller_type
    ssh_keys = var.ssh_keys
    location = var.location
    labels = {
        role = "controller"
    }

}

resource "hcloud_server" "worker" {
    count = var.worker_count
    name = "${var.cluster_name}-worker-${count.index}"
    image = var.image
    server_type = var.worker_type
    ssh_keys = var.ssh_keys
    location = var.location
    labels = {
        role = "worker"
    }
}

resource "hcloud_load_balancer" "load_balancer" {
  name       = "${var.cluster_name}-balancer"
  load_balancer_type = "lb11"
  location   = var.location
}

resource "hcloud_load_balancer_target" "load_balancer_target" {
  type             = "label_selector"
  load_balancer_id = hcloud_load_balancer.load_balancer.id
  label_selector = "role=controller"
}

resource "hcloud_load_balancer_service" "load_balancer_service_6443" {
    load_balancer_id = hcloud_load_balancer.load_balancer.id
    protocol = "tcp"
    listen_port = 6443
    destination_port = 6443
}

resource "hcloud_load_balancer_service" "load_balancer_service_9443" {
    load_balancer_id = hcloud_load_balancer.load_balancer.id
    protocol = "tcp"
    listen_port = 9443
    destination_port = 9443
}

resource "hcloud_load_balancer_service" "load_balancer_service_8132" {
    load_balancer_id = hcloud_load_balancer.load_balancer.id
    protocol = "tcp"
    listen_port = 8132
    destination_port = 8132
}

resource "hcloud_load_balancer_service" "load_balancer_service_8133" {
    load_balancer_id = hcloud_load_balancer.load_balancer.id
    protocol = "tcp"
    listen_port = 8133
    destination_port = 8133
}
locals {
    k0s_tmpl = {
        apiVersion = "k0sctl.k0sproject.io/v1beta1"
        kind = "cluster"
        spec = {
            hosts = [
                for host in concat(hcloud_server.controller, hcloud_server.worker) : {
                    ssh = {
                        address = host.ipv4_address
                        user = "root"
                    }
                    role = host.labels.role
                }
            ]
            k0s = {
                version = "0.12.1"
                "config" = {
                    "apiVersion" = "k0s.k0sproject.io/v1beta1"
                    "kind" =  "Cluster"
                    "metadata" = {
                        "name" = var.cluster_name
                    }
                    "spec" = {
                        "api" = {
                            "externalAddress" = hcloud_load_balancer.load_balancer.ipv4
                            "sans" = [hcloud_load_balancer.load_balancer.ipv4]
                        }
                    }
                }
            }
        }
    }
}

output "k0s_cluster" {
    value = yamlencode(local.k0s_tmpl)

}
07070100000055000081A400000000000000000000000168429769000000C7000000000000000000000000000000000000003B00000000k0sctl-0.25.1/examples/hetzner-tf/terraform.tfvars.examplessh_keys = ["you@domain.com"]
hcloud_token = "your-api-token"
cluster_name = "k0s"
location = "hel1"
image = "ubuntu-18.04"
master_type = "cx31"
master_count = 1
worker_type = "cx31"
worker_count = 207070100000056000081A4000000000000000000000001684297690000109C000000000000000000000000000000000000001500000000k0sctl-0.25.1/go.modmodule github.com/k0sproject/k0sctl

go 1.24.0

toolchain go1.24.2

require (
	github.com/AlecAivazis/survey/v2 v2.3.7
	github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
	github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 // indirect
	github.com/a8m/envsubst v1.4.2
	github.com/adrg/xdg v0.5.3
	github.com/bmatcuk/doublestar/v4 v4.8.1
	github.com/creasty/defaults v1.8.0
	github.com/gofrs/uuid v4.4.0+incompatible // indirect
	github.com/k0sproject/dig v0.4.0
	github.com/k0sproject/rig v0.21.0
	github.com/logrusorgru/aurora v2.0.3+incompatible
	github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect
	github.com/masterzen/winrm v0.0.0-20240702205601-3fad6e106085 // indirect
	github.com/mattn/go-isatty v0.0.20
	github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02
	github.com/sirupsen/logrus v1.9.3
	github.com/stretchr/testify v1.10.0
	github.com/urfave/cli/v2 v2.27.6
	golang.org/x/crypto v0.38.0 // indirect
	golang.org/x/net v0.40.0 // indirect
	golang.org/x/sys v0.33.0 // indirect
	golang.org/x/term v0.32.0 // indirect
	golang.org/x/text v0.26.0
	gopkg.in/yaml.v2 v2.4.0
)

require (
	al.essio.dev/pkg/shellescape v1.6.0
	github.com/carlmjohnson/versioninfo v0.22.5
	github.com/go-playground/validator/v10 v10.26.0
	github.com/jellydator/validation v1.1.0
	github.com/k0sproject/version v0.7.0
	github.com/sergi/go-diff v1.4.0
	k8s.io/client-go v0.33.1
)

require (
	github.com/Microsoft/go-winio v0.6.2 // indirect
	github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
	github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
	github.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b // indirect
	github.com/bodgit/windows v1.0.1 // indirect
	github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
	github.com/davidmz/go-pageant v1.0.2 // indirect
	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
	github.com/gabriel-vasile/mimetype v1.4.8 // indirect
	github.com/go-logr/logr v1.4.2 // indirect
	github.com/go-playground/locales v0.14.1 // indirect
	github.com/go-playground/universal-translator v0.18.1 // indirect
	github.com/gogo/protobuf v1.3.2 // indirect
	github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
	github.com/hashicorp/go-uuid v1.0.3 // indirect
	github.com/jcmturner/aescts/v2 v2.0.0 // indirect
	github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
	github.com/jcmturner/gofork v1.7.6 // indirect
	github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
	github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
	github.com/jcmturner/rpc/v2 v2.0.3 // indirect
	github.com/json-iterator/go v1.1.12 // indirect
	github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
	github.com/kevinburke/ssh_config v1.2.0 // indirect
	github.com/kr/text v0.2.0 // indirect
	github.com/leodido/go-urn v1.4.0 // indirect
	github.com/mattn/go-colorable v0.1.14 // indirect
	github.com/mattn/go-shellwords v1.0.12 // indirect
	github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
	github.com/mitchellh/go-homedir v1.1.0 // indirect
	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
	github.com/modern-go/reflect2 v1.0.2 // indirect
	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
	github.com/russross/blackfriday/v2 v2.1.0 // indirect
	github.com/spf13/pflag v1.0.6 // indirect
	github.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde // indirect
	github.com/x448/float16 v0.8.4 // indirect
	github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
	golang.org/x/oauth2 v0.27.0 // indirect
	golang.org/x/time v0.10.0 // indirect
	gopkg.in/inf.v0 v0.9.1 // indirect
	gopkg.in/yaml.v3 v3.0.1 // indirect
	k8s.io/apimachinery v0.33.1 // indirect
	k8s.io/klog/v2 v2.130.1 // indirect
	k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
	sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
	sigs.k8s.io/randfill v1.0.0 // indirect
	sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
	sigs.k8s.io/yaml v1.4.0 // indirect
)
07070100000057000081A40000000000000000000000016842976900006BC5000000000000000000000000000000000000001500000000k0sctl-0.25.1/go.sumal.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA=
al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 h1:w0E0fgc1YafGEh5cROhlROMWXiNoZqApk2PDN0M1+Ns=
github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg=
github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38=
github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b h1:baFN6AnR0SeC194X2D292IUZcHDs4JjStpqtE70fjXE=
github.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b/go.mod h1:Ram6ngyPDmP+0t6+4T2rymv0w0BS9N8Ch5vvUJccw5o=
github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc=
github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk=
github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jellydator/validation v1.1.0 h1:TBkx56y6dd0By2AhtStRdTIhDjtcuoSE9w6G6z7wQ4o=
github.com/jellydator/validation v1.1.0/go.mod h1:AaCjfkQ4Ykdcb+YCwqCtaI3wDsf2UAGhJ06lJs0VgOw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/k0sproject/dig v0.4.0 h1:yBxFUUxNXAMGBg6b7c6ypxdx/o3RmhoI5v5ABOw5tn0=
github.com/k0sproject/dig v0.4.0/go.mod h1:rlZ7N7ZEcB4Fi96TPXkZ4dqyAiDWOGLapyL9YpZ7Qz4=
github.com/k0sproject/rig v0.21.0 h1:BV3hVDZiH1at3F4QIkDFVpu0F7RajLo/yScNcYygALA=
github.com/k0sproject/rig v0.21.0/go.mod h1:CwxhnGDFDE1pQtaN41XUvoLMCnXWSnQoXT2n4kDE9lA=
github.com/k0sproject/version v0.7.0 h1:pWp60UaA7N9g/wKUz1HN6heFW1M6ioWHEo8glHYlq00=
github.com/k0sproject/version v0.7.0/go.mod h1:iNV3O8blndsQhxZ8zACfpQhrLDlrTvDlCzx+vgCFtSI=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=
github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
github.com/masterzen/winrm v0.0.0-20240702205601-3fad6e106085 h1:PiQLLKX4vMYlJImDzJYtQScF2BbQ0GAjPIHCDqzHHHs=
github.com/masterzen/winrm v0.0.0-20240702205601-3fad6e106085/go.mod h1:JajVhkiG2bYSNYYPYuWG7WZHr42CTjMTcCjfInRNCqc=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde h1:AMNpJRc7P+GTwVbl8DkK2I9I8BBUzNiHuH/tlxrpan0=
github.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde/go.mod h1:MvrEmduDUz4ST5pGZ7CABCnOU5f3ZiOAZzT6b1A6nX8=
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4=
k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
07070100000058000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001A00000000k0sctl-0.25.1/integration07070100000059000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002100000000k0sctl-0.25.1/integration/github0707010000005A000081A40000000000000000000000016842976900000D12000000000000000000000000000000000000002B00000000k0sctl-0.25.1/integration/github/github.gopackage github

import (
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"sort"
	"strings"
	"time"

	k0sversion "github.com/k0sproject/version"
)

const timeOut = time.Second * 10

// Asset describes a github asset
type Asset struct {
	Name string `json:"name"`
	URL  string `json:"browser_download_url"`
}

// Release describes a github release
type Release struct {
	URL        string  `json:"html_url"`
	TagName    string  `json:"tag_name"`
	PreRelease bool    `json:"prerelease"`
	Assets     []Asset `json:"assets"`
}

func (r *Release) IsNewer(b string) bool {
	this, err := k0sversion.NewVersion(r.TagName)
	if err != nil {
		return false
	}
	other, err := k0sversion.NewVersion(b)
	if err != nil {
		return false
	}
	return this.GreaterThan(other)
}

// LatestK0sBinaryURL returns the url for the latest k0s release by arch and os
func LatestK0sBinaryURL(arch, osKind string, preok bool) (string, error) {
	r, err := k0sversion.LatestByPrerelease(preok)
	if err != nil {
		return "", err
	}
	return r.DownloadURL(osKind, arch), nil
}

// LatestK0sVersion returns the latest k0s version number (without v prefix)
func LatestK0sVersion(preok bool) (string, error) {
	r, err := k0sversion.LatestByPrerelease(preok)
	if err != nil {
		return "", err
	}
	return strings.TrimPrefix(r.String(), "v"), nil
}

// LatestRelease returns the latest k0sctl version from github
func LatestRelease(preok bool) (Release, error) {
	latestRelease, err := fetchLatestRelease()
	if err != nil {
		return Release{}, fmt.Errorf("failed to fetch the latest release: %w", err)
	}

	if latestRelease.PreRelease && !preok {
		latestRelease, err = fetchLatestNonPrereleaseRelease()
		if err != nil {
			return Release{}, fmt.Errorf("failed to fetch the latest non-prerelease release: %w", err)
		}
	}

	return latestRelease, nil
}

// fetchLatestRelease fetches the latest release from the GitHub API
func fetchLatestRelease() (Release, error) {
	var release Release
	if err := unmarshalURLBody("https://api.github.com/repos/k0sproject/k0sctl/releases/latest", &release); err != nil {
		return Release{}, err
	}
	return release, nil
}

func unmarshalURLBody(url string, o interface{}) error {
	client := &http.Client{
		Timeout: timeOut,
	}

	resp, err := client.Get(url)
	if err != nil {
		return err
	}

	if resp.Body == nil {
		return fmt.Errorf("nil body")
	}

	if resp.StatusCode != 200 {
		return fmt.Errorf("backend returned http %d for %s", resp.StatusCode, url)
	}

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return err
	}

	if err := resp.Body.Close(); err != nil {
		return err
	}

	return json.Unmarshal(body, o)
}

// fetchLatestNonPrereleaseRelease fetches the latest non-prerelease from the GitHub API
func fetchLatestNonPrereleaseRelease() (Release, error) {
	var releases []Release
	if err := unmarshalURLBody("https://api.github.com/repos/k0sproject/k0sctl/releases", &releases); err != nil {
		return Release{}, err
	}

	var versions k0sversion.Collection
	for _, v := range releases {
		if v.PreRelease {
			continue
		}
		if version, err := k0sversion.NewVersion(v.TagName); err == nil {
			versions = append(versions, version)
		}
	}
	sort.Sort(versions)

	latest := versions[len(versions)-1].String()

	for _, v := range releases {
		if v.TagName == latest {
			return v, nil
		}
	}

	return Release{}, fmt.Errorf("no release found")
}
0707010000005B000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001700000000k0sctl-0.25.1/internal0707010000005C000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001D00000000k0sctl-0.25.1/internal/shell0707010000005D000081A400000000000000000000000168429769000005F3000000000000000000000000000000000000002600000000k0sctl-0.25.1/internal/shell/split.gopackage shell

// this is borrowed as-is from rig v2 until k0sctl is updated to use it

import (
	"fmt"
	"strings"
)

// Split splits the input string respecting shell-like quoted segments.
func Split(input string) ([]string, error) { //nolint:cyclop
	var segments []string

	currentSegment, ok := builderPool.Get().(*strings.Builder)
	if !ok {
		currentSegment = &strings.Builder{}
	}
	defer builderPool.Put(currentSegment)
	defer currentSegment.Reset()

	var inDoubleQuotes, inSingleQuotes, isEscaped bool

	for i := range len(input) {
		currentChar := input[i]

		if isEscaped {
			currentSegment.WriteByte(currentChar)
			isEscaped = false
			continue
		}

		switch {
		case currentChar == '\\' && !inSingleQuotes:
			isEscaped = true
		case currentChar == '"' && !inSingleQuotes:
			inDoubleQuotes = !inDoubleQuotes
		case currentChar == '\'' && !inDoubleQuotes:
			inSingleQuotes = !inSingleQuotes
		case currentChar == ' ' && !inDoubleQuotes && !inSingleQuotes:
			// Space outside quotes; delimiter for a new segment
			segments = append(segments, currentSegment.String())
			currentSegment.Reset()
		default:
			currentSegment.WriteByte(currentChar)
		}
	}

	if inDoubleQuotes || inSingleQuotes {
		return nil, fmt.Errorf("split `%q`: %w", input, ErrMismatchedQuotes)
	}

	if isEscaped {
		return nil, fmt.Errorf("split `%q`: %w", input, ErrTrailingBackslash)
	}

	// Add the last segment if present
	if currentSegment.Len() > 0 {
		segments = append(segments, currentSegment.String())
	}

	return segments, nil
}
0707010000005E000081A40000000000000000000000016842976900000802000000000000000000000000000000000000002800000000k0sctl-0.25.1/internal/shell/unquote.gopackage shell

import (
	"errors"
	"fmt"
	"strings"
	"sync"
)

// This is borrowed from rig v2 until k0sctl is updated to use it

var (
	builderPool = sync.Pool{
		New: func() interface{} {
			return &strings.Builder{}
		},
	}

	// ErrMismatchedQuotes is returned when the input string has mismatched quotes when unquoting.
	ErrMismatchedQuotes = errors.New("mismatched quotes")

	// ErrTrailingBackslash is returned when the input string ends with a trailing backslash.
	ErrTrailingBackslash = errors.New("trailing backslash")
)

// Unquote is a mostly POSIX compliant implementation of unquoting a string the same way a shell would.
// Variables and command substitutions are not handled.
func Unquote(input string) (string, error) { //nolint:cyclop
	sb, ok := builderPool.Get().(*strings.Builder)
	if !ok {
		sb = &strings.Builder{}
	}
	defer builderPool.Put(sb)
	defer sb.Reset()

	var inDoubleQuotes, inSingleQuotes, isEscaped bool

	for i := range len(input) {
		currentChar := input[i]

		if isEscaped {
			sb.WriteByte(currentChar)
			isEscaped = false
			continue
		}

		switch currentChar {
		case '\\':
			if !inSingleQuotes { // Escape works in double quotes or outside any quotes
				isEscaped = true
			} else {
				sb.WriteByte(currentChar) // Treat as a regular character within single quotes
			}
		case '"':
			if !inSingleQuotes { // Toggle double quotes only if not in single quotes
				inDoubleQuotes = !inDoubleQuotes
			} else {
				sb.WriteByte(currentChar) // Treat as a regular character within single quotes
			}
		case '\'':
			if !inDoubleQuotes { // Toggle single quotes only if not in double quotes
				inSingleQuotes = !inSingleQuotes
			} else {
				sb.WriteByte(currentChar) // Treat as a regular character within double quotes
			}
		default:
			sb.WriteByte(currentChar)
		}
	}

	if inDoubleQuotes || inSingleQuotes {
		return "", fmt.Errorf("unquote `%q`: %w", input, ErrMismatchedQuotes)
	}

	if isEscaped {
		return "", fmt.Errorf("unquote `%q`: %w", input, ErrTrailingBackslash)
	}

	return sb.String(), nil
}
0707010000005F000081A400000000000000000000000168429769000003AE000000000000000000000000000000000000002D00000000k0sctl-0.25.1/internal/shell/unquote_test.gopackage shell_test

import (
	"testing"

	"github.com/k0sproject/k0sctl/internal/shell"
	"github.com/stretchr/testify/require"
)

func TestUnquote(t *testing.T) {
	t.Run("no quotes", func(t *testing.T) {
		out, err := shell.Unquote("foo bar")
		require.NoError(t, err)
		require.Equal(t, "foo bar", out)
	})

	t.Run("simple quotes", func(t *testing.T) {
		out, err := shell.Unquote("\"foo\" 'bar'")
		require.NoError(t, err)
		require.Equal(t, "foo bar", out)
	})

	t.Run("mid-word quotes", func(t *testing.T) {
		out, err := shell.Unquote("f\"o\"o b'a'r")
		require.NoError(t, err)
		require.Equal(t, "foo bar", out)
	})

	t.Run("complex quotes", func(t *testing.T) {
		out, err := shell.Unquote(`'"'"'foo'"'"'`)
		require.NoError(t, err)
		require.Equal(t, `"'foo'"`, out)
	})

	t.Run("escaped quotes", func(t *testing.T) {
		out, err := shell.Unquote("\\'foo\\' 'bar'")
		require.NoError(t, err)
		require.Equal(t, "'foo' bar", out)
	})
}
07070100000060000081A400000000000000000000000168429769000001CD000000000000000000000000000000000000001600000000k0sctl-0.25.1/main.gopackage main

import (
	"os"

	"github.com/k0sproject/k0sctl/cmd"
	log "github.com/sirupsen/logrus"

	// blank import to make sure versioninfo is included in the binary
	_ "github.com/carlmjohnson/versioninfo"
	// blank import to make sure versioninfo is included in the binary
	_ "github.com/k0sproject/k0sctl/version"
)

func main() {
	k0sctl := cmd.NewK0sctl(os.Stdin, os.Stdout, os.Stderr)
	if err := k0sctl.Run(os.Args); err != nil {
		log.Fatal(err)
	}
}
07070100000061000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001400000000k0sctl-0.25.1/phase07070100000062000081A40000000000000000000000016842976900000784000000000000000000000000000000000000002700000000k0sctl-0.25.1/phase/apply_manifests.gopackage phase

import (
	"bytes"
	"context"
	"fmt"
	"io"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// ApplyManifests is a phase that applies additional manifests to the cluster
type ApplyManifests struct {
	GenericPhase
	leader *cluster.Host
}

// Title for the phase
func (p *ApplyManifests) Title() string {
	return "Apply additional manifests"
}

// Prepare the phase
func (p *ApplyManifests) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()

	return nil
}

// ShouldRun is true when there are additional manifests to apply
func (p *ApplyManifests) ShouldRun() bool {
	return len(p.Config.Metadata.Manifests) > 0
}

// Run the phase
func (p *ApplyManifests) Run(_ context.Context) error {
	for name, content := range p.Config.Metadata.Manifests {
		if err := p.apply(name, content); err != nil {
			return err
		}
	}

	return nil
}

func (p *ApplyManifests) apply(name string, content []byte) error {
	if !p.IsWet() {
		p.DryMsgf(p.leader, "apply manifest %s (%d bytes)", name, len(content))
		return nil
	}

	log.Infof("%s: apply manifest %s (%d bytes)", p.leader, name, len(content))
	kubectlCmd := p.leader.Configurer.KubectlCmdf(p.leader, p.leader.K0sDataDir(), "apply -f -")
	var stdout, stderr bytes.Buffer

	cmd, err := p.leader.ExecStreams(kubectlCmd, io.NopCloser(bytes.NewReader(content)), &stdout, &stderr, exec.Sudo(p.leader))
	if err != nil {
		return fmt.Errorf("failed to run apply for manifest %s: %w", name, err)
	}
	if err := cmd.Wait(); err != nil {
		log.Errorf("%s: kubectl apply failed for manifest %s", p.leader, name)
		log.Errorf("%s: kubectl apply stderr: %s", p.leader, stderr.String())
	}
	log.Infof("%s: kubectl apply: %s", p.leader, stdout.String())
	return nil
}
07070100000063000081A400000000000000000000000168429769000006C1000000000000000000000000000000000000002300000000k0sctl-0.25.1/phase/arm_prepare.gopackage phase

import (
	"context"
	"strings"

	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

var etcdSupportedArchArm64Since = version.MustParse("v1.22.1+k0s.0")

// PrepareArm implements a phase which fixes arm quirks
type PrepareArm struct {
	GenericPhase

	hosts cluster.Hosts
}

// Title for the phase
func (p *PrepareArm) Title() string {
	return "Prepare ARM nodes"
}

// Prepare the phase
func (p *PrepareArm) Prepare(config *v1beta1.Cluster) error {
	p.Config = config

	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		if h.Reset {
			return false
		}

		if h.Role == "worker" {
			return false
		}

		arch := h.Metadata.Arch

		if !strings.HasPrefix(arch, "arm") && !strings.HasPrefix(arch, "aarch") {
			return false
		}

		if strings.HasSuffix(arch, "64") {
			// 64-bit arm is supported on etcd 3.5.0+ which is included in k0s v1.22.1+k0s.0 and newer
			if p.Config.Spec.K0s.Version.GreaterThanOrEqual(etcdSupportedArchArm64Since) {
				return false
			}
		}

		return true
	})

	return nil
}

// ShouldRun is true when there are arm controllers
func (p *PrepareArm) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *PrepareArm) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.hosts, p.etcdUnsupportedArch)
}

func (p *PrepareArm) etcdUnsupportedArch(_ context.Context, h *cluster.Host) error {
	log.Warnf("%s: enabling ETCD_UNSUPPORTED_ARCH=%s override - you may encounter problems with etcd", h, h.Metadata.Arch)
	h.Environment["ETCD_UNSUPPORTED_ARCH"] = h.Metadata.Arch

	return nil
}
07070100000064000081A40000000000000000000000016842976900000B1A000000000000000000000000000000000000001E00000000k0sctl-0.25.1/phase/backup.gopackage phase

import (
	"context"
	"fmt"
	"io"
	"path"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

var _ Phase = &Backup{}

var backupSinceVersion = version.MustParse("v1.21.0-rc.1+k0s.0")

// Backup connect to one of the controllers and takes a backup
type Backup struct {
	GenericPhase

	Out io.Writer

	leader *cluster.Host
}

// Title returns the title for the phase
func (p *Backup) Title() string {
	return "Take backup"
}

// Prepare the phase
func (p *Backup) Prepare(config *v1beta1.Cluster) error {
	p.Config = config

	if !p.Config.Spec.K0s.Version.GreaterThanOrEqual(backupSinceVersion) {
		return fmt.Errorf("the version of k0s on the host does not support taking backups")
	}

	leader := p.Config.Spec.K0sLeader()
	if leader.Metadata.K0sRunningVersion == nil {
		return fmt.Errorf("failed to find a running controller")
	}

	p.leader = leader
	p.leader.Metadata.IsK0sLeader = true
	return nil
}

// ShouldRun is true when there is a leader host
func (p *Backup) ShouldRun() bool {
	return p.leader != nil
}

// Run the phase
func (p *Backup) Run(_ context.Context) error {
	h := p.leader

	log.Infof("%s: backing up", h)
	var backupDir string
	err := p.Wet(h, "create a tempdir using `mktemp -d`", func() error {
		b, err := h.Configurer.TempDir(h)
		if err != nil {
			return err
		}
		backupDir = b
		return nil
	}, func() error {
		backupDir = "/tmp/k0s_backup.dryrun"
		return nil
	})
	if err != nil {
		return err
	}

	cmd := h.K0sBackupCommand(backupDir)
	err = p.Wet(h, fmt.Sprintf("create backup using `%s`", cmd), func() error {
		return h.Exec(h.K0sBackupCommand(backupDir), exec.Sudo(h))
	})
	if err != nil {
		return err
	}

	// get the name of the backup file
	var remoteFile string
	if p.IsWet() {
		r, err := h.ExecOutputf(`ls "%s"`, backupDir)
		if err != nil {
			return err
		}
		remoteFile = r
	} else {
		remoteFile = "k0s_backup.dryrun.tar.gz"
	}
	remotePath := path.Join(backupDir, remoteFile)

	defer func() {
		if p.IsWet() {
			log.Debugf("%s: cleaning up %s", h, remotePath)
			if err := h.Configurer.DeleteFile(h, remotePath); err != nil {
				log.Warnf("%s: failed to clean up backup temp file %s: %s", h, remotePath, err)
			}
			if err := h.Configurer.DeleteDir(h, backupDir, exec.Sudo(h)); err != nil {
				log.Warnf("%s: failed to clean up backup temp directory %s: %s", h, backupDir, err)
			}
		} else {
			p.DryMsg(h, "delete the tempdir")
		}
	}()

	if p.IsWet() {
		if err := h.Execf(`cat "%s"`, remotePath, exec.Writer(p.Out)); err != nil {
			return fmt.Errorf("download backup: %w", err)
		}
	} else {
		p.DryMsgf(nil, "download the backup file to local host")
	}
	return nil
}
07070100000065000081A40000000000000000000000016842976900002B07000000000000000000000000000000000000002500000000k0sctl-0.25.1/phase/configure_k0s.gopackage phase

import (
	"bytes"
	"context"
	"fmt"
	gopath "path"
	"slices"
	"time"

	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/version"
	"github.com/sergi/go-diff/diffmatchpatch"
	log "github.com/sirupsen/logrus"
	"gopkg.in/yaml.v2"
)

// "k0s default-config" was replaced with "k0s config create" in v1.23.1+k0s.0
var configCreateSince = version.MustParse("v1.23.1+k0s.0")

const (
	configSourceExisting int = iota
	configSourceDefault
	configSourceProvided
	configSourceNodeConfig
)

// ConfigureK0s writes the k0s configuration to host k0s config dir
type ConfigureK0s struct {
	GenericPhase
	leader        *cluster.Host
	configSource  int
	newBaseConfig dig.Mapping
	hosts         cluster.Hosts
}

// Title returns the phase title
func (p *ConfigureK0s) Title() string {
	return "Configure k0s"
}

// Prepare the phase
func (p *ConfigureK0s) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()

	if len(p.Config.Spec.K0s.Config) > 0 {
		log.Debug("using provided k0s config")
		p.configSource = configSourceProvided
		p.newBaseConfig = p.Config.Spec.K0s.Config.Dup()
	} else if p.leader.Metadata.K0sExistingConfig != "" {
		log.Debug("using existing k0s config")
		p.configSource = configSourceExisting
		p.newBaseConfig = make(dig.Mapping)
		err := yaml.Unmarshal([]byte(p.leader.Metadata.K0sExistingConfig), &p.newBaseConfig)
		if err != nil {
			return fmt.Errorf("failed to unmarshal existing k0s config: %w", err)
		}
	} else {
		log.Debug("using generated default k0s config")
		p.configSource = configSourceDefault
		cfg, err := p.generateDefaultConfig()
		if err != nil {
			return fmt.Errorf("failed to generate default k0s config: %w", err)
		}
		p.newBaseConfig = make(dig.Mapping)
		err = yaml.Unmarshal([]byte(cfg), &p.newBaseConfig)
		if err != nil {
			return fmt.Errorf("failed to unmarshal default k0s config: %w", err)
		}
	}

	// convert sans from unmarshaled config into []string
	var sans []string
	oldsans := p.newBaseConfig.Dig("spec", "api", "sans")
	switch oldsans := oldsans.(type) {
	case []interface{}:
		for _, v := range oldsans {
			if s, ok := v.(string); ok {
				sans = append(sans, s)
			}
		}
		log.Tracef("converted sans from %T to []string", oldsans)
	case []string:
		sans = append(sans, oldsans...)
		log.Tracef("sans was readily %T", oldsans)
	default:
		// do nothing - base k0s config does not contain any existing SANs
	}

	// populate SANs with all controller addresses
	for i, c := range p.Config.Spec.Hosts.Controllers() {
		if c.Reset {
			continue
		}
		if !slices.Contains(sans, c.Address()) {
			sans = append(sans, c.Address())
			log.Debugf("added controller %d address %s to spec.api.sans", i+1, c.Address())
		}
		if c.PrivateAddress != "" && !slices.Contains(sans, c.PrivateAddress) {
			sans = append(sans, c.PrivateAddress)
			log.Debugf("added controller %d private address %s to spec.api.sans", i+1, c.PrivateAddress)
		}
	}

	// assign populated sans to the base config
	p.newBaseConfig.DigMapping("spec", "api")["sans"] = sans

	for _, h := range p.Config.Spec.Hosts.Controllers() {
		if h.Reset {
			continue
		}

		cfgNew, err := p.configFor(h)
		if err != nil {
			return fmt.Errorf("failed to build k0s config for %s: %w", h, err)
		}
		tempConfigPath, err := h.Configurer.TempFile(h)
		if err != nil {
			return fmt.Errorf("failed to create temporary file for config: %w", err)
		}
		defer func() {
			if err := h.Configurer.DeleteFile(h, tempConfigPath); err != nil {
				log.Warnf("%s: failed to delete temporary file %s: %s", h, tempConfigPath, err)
			}
		}()

		if err := h.Configurer.WriteFile(h, tempConfigPath, cfgNew, "0600"); err != nil {
			return err
		}

		if err := p.validateConfig(h, tempConfigPath); err != nil {
			return err
		}

		cfgA := make(map[string]any)
		cfgB := make(map[string]any)
		if err := yaml.Unmarshal([]byte(cfgNew), &cfgA); err != nil {
			return fmt.Errorf("failed to unmarshal new config: %w", err)
		}
		if err := yaml.Unmarshal([]byte(h.Metadata.K0sExistingConfig), &cfgB); err != nil {
			return fmt.Errorf("failed to unmarshal existing config: %w", err)
		}
		cfgAString, err := yaml.Marshal(cfgA)
		if err != nil {
			return fmt.Errorf("failed to marshal new config: %w", err)
		}
		cfgBString, err := yaml.Marshal(cfgB)
		if err != nil {
			return fmt.Errorf("failed to marshal existing config: %w", err)
		}

		if bytes.Equal(cfgAString, cfgBString) {
			log.Debugf("%s: configuration will not change", h)
			continue
		}

		log.Debugf("%s: configuration will change", h)
		h.Metadata.K0sNewConfig = cfgNew
		p.hosts = append(p.hosts, h)
	}

	return nil
}

// DryRun prints the actions that would be taken
func (p *ConfigureK0s) DryRun() error {
	for _, h := range p.hosts {
		p.DryMsgf(h, "write k0s configuration to %s", h.Configurer.K0sConfigPath())
		switch p.configSource {
		case configSourceDefault:
			p.DryMsg(h, "k0s configuration is based on a generated k0s default configuration")
		case configSourceExisting:
			p.DryMsgf(h, "k0s configuration is based on an existing k0s configuration found on %s", p.Config.Spec.K0sLeader())
		case configSourceProvided:
			p.DryMsg(h, "k0s configuration is based on spec.k0s.config in k0sctl config")
		case configSourceNodeConfig:
			p.DryMsg(h, "k0s configuration is a generated node specific config for dynamic config clusters")
		}

		dmp := diffmatchpatch.New()
		diffs := dmp.DiffMain(h.Metadata.K0sExistingConfig, h.Metadata.K0sNewConfig, false)
		p.DryMsgf(h, "configuration changes:\n%s", dmp.DiffPrettyText(diffs))

		if h.Metadata.K0sRunningVersion != nil && !h.Metadata.NeedsUpgrade {
			p.DryMsg(h, Colorize.BrightRed("restart the k0s service").String())
		}
	}
	return nil
}

// ShouldRun is true when there are controllers to configure
func (p *ConfigureK0s) ShouldRun() bool {
	return len(p.hosts) > 0
}

func (p *ConfigureK0s) generateDefaultConfig() (string, error) {
	log.Debugf("%s: generating default configuration", p.leader)
	var cmd string
	if p.leader.Metadata.K0sBinaryVersion.GreaterThanOrEqual(configCreateSince) {
		cmd = p.leader.Configurer.K0sCmdf("config create --data-dir=%s", p.leader.K0sDataDir())
	} else {
		cmd = p.leader.Configurer.K0sCmdf("default-config")
	}

	cfg, err := p.leader.ExecOutput(cmd, exec.Sudo(p.leader))
	if err != nil {
		return "", err
	}

	return cfg, nil
}

// Run the phase
func (p *ConfigureK0s) Run(ctx context.Context) error {
	controllers := p.Config.Spec.Hosts.Controllers().Filter(func(h *cluster.Host) bool {
		return !h.Reset && len(h.Metadata.K0sNewConfig) > 0
	})
	return p.parallelDo(ctx, controllers, p.configureK0s)
}

func (p *ConfigureK0s) validateConfig(h *cluster.Host, configPath string) error {
	log.Infof("%s: validating configuration", h)

	var cmd string

	if h.Metadata.K0sBinaryTempFile != "" {
		oldK0sBinaryPath := h.Configurer.K0sBinaryPath()
		h.Configurer.SetPath("K0sBinaryPath", h.Metadata.K0sBinaryTempFile)
		defer func() {
			h.Configurer.SetPath("K0sBinaryPath", oldK0sBinaryPath)
		}()
	}

	log.Debugf("%s: comparing k0s version %s with %s", h, p.Config.Spec.K0s.Version, configCreateSince)
	if p.Config.Spec.K0s.Version.GreaterThanOrEqual(configCreateSince) {
		log.Debugf("%s: comparison result true", h)
		cmd = h.Configurer.K0sCmdf(`config validate --config "%s"`, configPath)
	} else {
		log.Debugf("%s: comparison result false", h)
		cmd = h.Configurer.K0sCmdf(`validate config --config "%s"`, configPath)
	}

	var stderrBuf bytes.Buffer
	command, err := h.ExecStreams(cmd, nil, nil, &stderrBuf, exec.Sudo(h))
	if err != nil {
		return fmt.Errorf("can't run spec.k0s.config validation: %w", err)
	}
	if err := command.Wait(); err != nil {
		return fmt.Errorf("spec.k0s.config validation failed:: %w (%s)", err, stderrBuf.String())
	}

	return nil
}

func (p *ConfigureK0s) configureK0s(ctx context.Context, h *cluster.Host) error {
	path := h.K0sConfigPath()
	if h.Configurer.FileExist(h, path) {
		if !h.Configurer.FileContains(h, path, " generated-by-k0sctl") {
			newpath := path + ".old"
			log.Warnf("%s: an existing config was found and will be backed up as %s", h, newpath)
			if err := h.Configurer.MoveFile(h, path, newpath); err != nil {
				return err
			}
		}
	}

	log.Debugf("%s: writing k0s configuration", h)
	tempConfigPath, err := h.Configurer.TempFile(h)
	if err != nil {
		return fmt.Errorf("failed to create temporary file for config: %w", err)
	}

	if err := h.Configurer.WriteFile(h, tempConfigPath, h.Metadata.K0sNewConfig, "0600"); err != nil {
		return err
	}

	log.Infof("%s: installing new configuration", h)
	configPath := h.K0sConfigPath()
	configDir := gopath.Dir(configPath)

	if !h.Configurer.FileExist(h, configDir) {
		if err := h.Execf(`install -m 0750 -o root -g root -d "%s"`, configDir, exec.Sudo(h)); err != nil {
			return fmt.Errorf("failed to create k0s configuration directory: %w", err)
		}
	}

	if err := h.Execf(`install -m 0600 -o root -g root "%s" "%s"`, tempConfigPath, configPath, exec.Sudo(h)); err != nil {
		return fmt.Errorf("failed to install k0s configuration: %w", err)
	}

	if h.Metadata.K0sRunningVersion != nil && !h.Metadata.NeedsUpgrade {
		log.Infof("%s: restarting k0s service", h)
		if err := h.Configurer.RestartService(h, h.K0sServiceName()); err != nil {
			return err
		}

		log.Infof("%s: waiting for k0s service to start", h)
		return retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceRunningFunc(h, h.K0sServiceName()))
	}

	return nil
}

func (p *ConfigureK0s) configFor(h *cluster.Host) (string, error) {
	var cfg dig.Mapping

	if p.Config.Spec.K0s.DynamicConfig {
		if h == p.leader && h.Metadata.K0sRunningVersion == nil {
			log.Debugf("%s: leader will get a full config on initialize ", h)
			cfg = p.newBaseConfig.Dup()
		} else {
			log.Debugf("%s: using a stripped down config for dynamic config", h)
			cfg = p.Config.Spec.K0s.NodeConfig()
		}
	} else {
		cfg = p.newBaseConfig.Dup()
	}

	var addr string

	if h.PrivateAddress != "" {
		addr = h.PrivateAddress
	} else {
		addr = h.Address()
	}

	if cfg.DigString("spec", "api", "address") == "" {
		if onlyBindAddr, ok := cfg.Dig("spec", "api", "onlyBindToAddress").(bool); ok && onlyBindAddr {
			cfg.DigMapping("spec", "api")["address"] = addr
		}
	}

	if p.Config.StorageType() == "etcd" {
		if cfg.Dig("spec", "storage", "etcd", "peerAddress") != nil || h.PrivateAddress != "" {
			cfg.DigMapping("spec", "storage", "etcd")["peerAddress"] = addr
		}
	}

	if _, ok := cfg["apiVersion"]; !ok {
		cfg["apiVersion"] = "k0s.k0sproject.io/v1beta1"
	}

	if _, ok := cfg["kind"]; !ok {
		cfg["kind"] = "ClusterConfig"
	}

	c, err := yaml.Marshal(cfg)
	if err != nil {
		return "", err
	}
	return fmt.Sprintf("# generated-by-k0sctl %s\n%s", time.Now().Format(time.RFC3339), c), nil
}
07070100000066000081A400000000000000000000000168429769000003AA000000000000000000000000000000000000001F00000000k0sctl-0.25.1/phase/connect.gopackage phase

import (
	"context"
	"errors"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig"
	log "github.com/sirupsen/logrus"
)

// Connect connects to each of the hosts
type Connect struct {
	GenericPhase
}

// Title for the phase
func (p *Connect) Title() string {
	return "Connect to hosts"
}

// Run the phase
func (p *Connect) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.Config.Spec.Hosts, func(ctx context.Context, h *cluster.Host) error {
		return retry.AdaptiveTimeout(ctx, 10*time.Minute, func(_ context.Context) error {
			if err := h.Connect(); err != nil {
				if errors.Is(err, rig.ErrCantConnect) || strings.Contains(err.Error(), "host key mismatch") {
					return errors.Join(retry.ErrAbort, err)
				}

				return err
			}

			log.Infof("%s: connected", h)

			return nil
		})
	})
}
07070100000067000081A40000000000000000000000016842976900000380000000000000000000000000000000000000002500000000k0sctl-0.25.1/phase/daemon_reload.gopackage phase

import (
	"context"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	log "github.com/sirupsen/logrus"
)

// DaemonReload phase runs `systemctl daemon-reload` or equivalent on all hosts.
type DaemonReload struct {
	GenericPhase
}

// Title for the phase
func (p *DaemonReload) Title() string {
	return "Reload service manager"
}

// ShouldRun is true when there are controllers that needs to be reset
func (p *DaemonReload) ShouldRun() bool {
	return len(p.Config.Spec.Hosts) > 0
}

// Run the phase
func (p *DaemonReload) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.Config.Spec.Hosts, func(_ context.Context, h *cluster.Host) error {
		log.Infof("%s: reloading service manager", h)
		if err := h.Configurer.DaemonReload(h); err != nil {
			log.Warnf("%s: failed to reload service manager: %s", h, err.Error())
		}
		return nil
	})
}
07070100000068000081A400000000000000000000000168429769000003FA000000000000000000000000000000000000002B00000000k0sctl-0.25.1/phase/default_k0s_version.gopackage phase

import (
	"context"
	"fmt"

	"github.com/k0sproject/version"

	log "github.com/sirupsen/logrus"
)

type DefaultK0sVersion struct {
	GenericPhase
}

func (p *DefaultK0sVersion) ShouldRun() bool {
	return p.Config.Spec.K0s.Version == nil || p.Config.Spec.K0s.Version.IsZero()
}

func (p *DefaultK0sVersion) Title() string {
	return "Set k0s version"
}

func (p *DefaultK0sVersion) Run(_ context.Context) error {
	isStable := p.Config.Spec.K0s.VersionChannel == "" || p.Config.Spec.K0s.VersionChannel == "stable"

	var msg string
	if isStable {
		msg = "latest stable k0s version"
	} else {
		msg = "latest k0s version including pre-releases"
	}

	log.Info("Looking up ", msg)
	latest, err := version.LatestByPrerelease(!isStable)
	if err != nil {
		return fmt.Errorf("failed to look up k0s version online - try setting spec.k0s.version manually: %w", err)
	}
	log.Infof("Using k0s version %s", latest)
	p.Config.Spec.K0s.Version = latest
	p.Config.Spec.K0s.Metadata.VersionDefaulted = true

	return nil
}
07070100000069000081A40000000000000000000000016842976900000616000000000000000000000000000000000000002100000000k0sctl-0.25.1/phase/detect_os.gopackage phase

import (
	"context"
	"strings"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"

	// anonymous import is needed to load the os configurers
	_ "github.com/k0sproject/k0sctl/configurer"
	// anonymous import is needed to load the os configurers
	_ "github.com/k0sproject/k0sctl/configurer/linux"
	// anonymous import is needed to load the os configurers
	_ "github.com/k0sproject/k0sctl/configurer/linux/enterpriselinux"

	log "github.com/sirupsen/logrus"
)

// DetectOS performs remote OS detection
type DetectOS struct {
	GenericPhase
}

// Title for the phase
func (p *DetectOS) Title() string {
	return "Detect host operating systems"
}

// Run the phase
func (p *DetectOS) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.Config.Spec.Hosts, func(_ context.Context, h *cluster.Host) error {
		if h.OSIDOverride != "" {
			log.Infof("%s: OS ID has been manually set to %s", h, h.OSIDOverride)
		}
		if err := h.ResolveConfigurer(); err != nil {
			if h.OSVersion.IDLike != "" {
				log.Debugf("%s: trying to find a fallback OS support module for %s using os-release ID_LIKE '%s'", h, h.OSVersion.String(), h.OSVersion.IDLike)
				for _, id := range strings.Split(h.OSVersion.IDLike, " ") {
					h.OSVersion.ID = id
					if err := h.ResolveConfigurer(); err == nil {
						log.Warnf("%s: using '%s' as OS support fallback for %s", h, id, h.OSVersion.String())
						return nil
					}
				}
			}
			return err
		}
		os := h.OSVersion.String()
		log.Infof("%s: is running %s", h, os)

		return nil
	})
}
0707010000006A000081A4000000000000000000000001684297690000039D000000000000000000000000000000000000002200000000k0sctl-0.25.1/phase/disconnect.gopackage phase

import (
	"context"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

// Disconnect disconnects from the hosts
type Disconnect struct {
	GenericPhase
}

// Title for the phase
func (p *Disconnect) Title() string {
	return "Disconnect from hosts"
}

// DryRun cleans up the temporary k0s binary from the hosts
func (p *Disconnect) DryRun() error {
	_ = p.Config.Spec.Hosts.ParallelEach(context.Background(), func(_ context.Context, h *cluster.Host) error {
		if h.Metadata.K0sBinaryTempFile != "" && h.Configurer.FileExist(h, h.Metadata.K0sBinaryTempFile) {
			_ = h.Configurer.DeleteFile(h, h.Metadata.K0sBinaryTempFile)
		}
		return nil
	})

	return p.Run(context.TODO())
}

// Run the phase
func (p *Disconnect) Run(ctx context.Context) error {
	return p.Config.Spec.Hosts.ParallelEach(ctx, func(_ context.Context, h *cluster.Host) error {
		h.Disconnect()
		return nil
	})
}
0707010000006B000081A40000000000000000000000016842976900000ECD000000000000000000000000000000000000002900000000k0sctl-0.25.1/phase/download_binaries.gopackage phase

import (
	"context"
	"fmt"
	"io"
	"net/http"
	"os"
	"path"
	"strings"

	"github.com/adrg/xdg"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

// DownloadBinaries downloads k0s binaries to localohost temp files
type DownloadBinaries struct {
	GenericPhase
	hosts []*cluster.Host
}

// Title for the phase
func (p *DownloadBinaries) Title() string {
	return "Download k0s binaries to local host"
}

// Prepare the phase
func (p *DownloadBinaries) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		return !h.Reset && h.UploadBinary && !h.Metadata.K0sBinaryVersion.Equal(config.Spec.K0s.Version)
	})
	return nil
}

// ShouldRun is true when the phase should be run
func (p *DownloadBinaries) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *DownloadBinaries) Run(_ context.Context) error {
	var bins binaries

	for _, h := range p.hosts {
		if bin := bins.find(h.Configurer.Kind(), h.Metadata.Arch); bin != nil {
			continue
		}

		bin := &binary{arch: h.Metadata.Arch, os: h.Configurer.Kind(), version: p.Config.Spec.K0s.Version}

		// find configuration defined binpaths and use instead of downloading a new one
		for _, v := range p.hosts {
			if v.Metadata.Arch == bin.arch && v.Configurer.Kind() == bin.os && v.K0sBinaryPath != "" {
				bin.path = h.K0sBinaryPath
			}
		}

		bins = append(bins, bin)
	}

	for _, bin := range bins {
		if bin.path != "" {
			continue
		}
		if err := bin.download(); err != nil {
			return err
		}
	}

	for _, h := range p.hosts {
		if h.K0sBinaryPath == "" {
			if bin := bins.find(h.Configurer.Kind(), h.Metadata.Arch); bin != nil {
				h.UploadBinaryPath = bin.path
			}
		} else {
			h.UploadBinaryPath = h.K0sBinaryPath
		}
	}

	return nil
}

type binary struct {
	arch    string
	os      string
	version *version.Version
	path    string
}

func (b *binary) download() error {
	fn := path.Join("k0sctl", "k0s", b.os, b.arch, "k0s-"+strings.TrimPrefix(b.version.String(), "v")+b.ext())
	p, err := xdg.SearchCacheFile(fn)
	if err == nil {
		b.path = p
		return nil
	}
	p, err = xdg.CacheFile(fn)
	if err != nil {
		return err
	}
	if err := b.downloadTo(p); err != nil {
		return err
	}

	b.path = p
	log.Infof("using k0s binary from %s for %s-%s", b.path, b.os, b.arch)

	return nil
}

func (b binary) ext() string {
	if b.os == "windows" {
		return ".exe"
	}
	return ""
}

func (b binary) url() string {
	v := strings.ReplaceAll(strings.TrimPrefix(b.version.String(), "v"), "+", "%2B")
	return fmt.Sprintf("https://github.com/k0sproject/k0s/releases/download/v%[1]s/k0s-v%[1]s-%[2]s%[3]s", v, b.arch, b.ext())
}

func (b binary) downloadTo(path string) error {
	log.Infof("downloading k0s version %s binary for %s-%s from %s", b.version, b.os, b.arch, b.url())

	var err error

	f, err := os.Create(path)
	if err != nil {
		return err
	}

	defer func() {
		if err != nil {
			err = os.Remove(path)
			if err != nil {
				log.Warnf("failed to remove broken download at %s: %s", path, err.Error())
			}
		}
	}()

	resp, err := http.Get(b.url())
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("failed to get k0s binary (http %d)", resp.StatusCode)
	}

	_, err = io.Copy(f, resp.Body)
	if err != nil {
		return err
	}

	if err = f.Close(); err == nil {
		return err
	}

	log.Infof("cached k0s binary to %s", path)

	return nil
}

type binaries []*binary

func (b binaries) find(os, arch string) *binary {
	for _, v := range b {
		if v.arch == arch && v.os == os {
			return v
		}
	}
	return nil
}
0707010000006C000081A40000000000000000000000016842976900000A06000000000000000000000000000000000000002400000000k0sctl-0.25.1/phase/download_k0s.gopackage phase

import (
	"context"
	"fmt"
	"strconv"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// DownloadK0s performs k0s online download on the hosts
type DownloadK0s struct {
	GenericPhase
	hosts cluster.Hosts
}

// Title for the phase
func (p *DownloadK0s) Title() string {
	return "Download k0s on hosts"
}

// Prepare the phase
func (p *DownloadK0s) Prepare(config *v1beta1.Cluster) error {
	p.Config = config

	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		// Nothing to download
		if h.UploadBinary {
			return false
		}

		// No need to download, host is going to be reset
		if h.Reset {
			return false
		}

		// The version on host is already same as the target version
		if p.Config.Spec.K0s.Version.Equal(h.Metadata.K0sBinaryVersion) {
			log.Debugf("%s: k0s version on target host is already %s", h, h.Metadata.K0sBinaryVersion)
			return false
		}

		return true
	})

	return nil
}

// ShouldRun is true when the phase should be run
func (p *DownloadK0s) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *DownloadK0s) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.hosts, p.downloadK0s)
}

func (p *DownloadK0s) downloadK0s(_ context.Context, h *cluster.Host) error {
	tmp := h.Configurer.K0sBinaryPath() + ".tmp." + strconv.Itoa(int(time.Now().UnixNano()))

	log.Infof("%s: downloading k0s %s", h, p.Config.Spec.K0s.Version)
	if h.K0sDownloadURL != "" {
		expandedURL := h.ExpandTokens(h.K0sDownloadURL, p.Config.Spec.K0s.Version)
		log.Infof("%s: downloading k0s binary from %s", h, expandedURL)
		if err := h.Configurer.DownloadURL(h, expandedURL, tmp, exec.Sudo(h)); err != nil {
			return fmt.Errorf("failed to download k0s binary: %w", err)
		}
	} else if err := h.Configurer.DownloadK0s(h, tmp, p.Config.Spec.K0s.Version, h.Metadata.Arch, exec.Sudo(h)); err != nil {
		return err
	}

	if err := h.Execf(`chmod +x "%s"`, tmp, exec.Sudo(h)); err != nil {
		log.Warnf("%s: failed to chmod k0s temp binary: %v", h, err.Error())
	}

	h.Metadata.K0sBinaryTempFile = tmp

	return nil
}

// Cleanup removes the binary temp file if it wasn't used
func (p *DownloadK0s) CleanUp() {
	_ = p.parallelDo(context.Background(), p.hosts, func(_ context.Context, h *cluster.Host) error {
		if h.Metadata.K0sBinaryTempFile != "" {
			_ = h.Configurer.DeleteFile(h, h.Metadata.K0sBinaryTempFile)
		}
		return nil
	})
}
0707010000006D000081A40000000000000000000000016842976900000A9C000000000000000000000000000000000000002400000000k0sctl-0.25.1/phase/gather_facts.gopackage phase

import (
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

// Note: Passwordless sudo has not yet been confirmed when this runs

// GatherFacts gathers information about hosts, such as if k0s is already up and running
type GatherFacts struct {
	GenericPhase
	SkipMachineIDs bool
}

var (
	// K0s doesn't rely on unique machine IDs anymore since v1.30.
	uniqueMachineIDSince = version.MustParse("v1.30.0")
	// --kubelet-root-dir was introduced in v1.32.1-rc.0
	kubeletRootDirSince = version.MustParse("v1.32.1-rc.0")
)

// Title for the phase
func (p *GatherFacts) Title() string {
	return "Gather host facts"
}

// Run the phase
func (p *GatherFacts) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.Config.Spec.Hosts, p.investigateHost)
}

func (p *GatherFacts) investigateHost(_ context.Context, h *cluster.Host) error {
	output, err := h.Configurer.Arch(h)
	if err != nil {
		return err
	}
	h.Metadata.Arch = output

	if !p.SkipMachineIDs && p.Config.Spec.K0s.Version.LessThan(uniqueMachineIDSince) {
		id, err := h.Configurer.MachineID(h)
		if err != nil {
			return err
		}
		h.Metadata.MachineID = id
	}

	if extra := h.InstallFlags.GetValue("--kubelet-extra-args"); extra != "" {
		ef := cluster.Flags{extra}
		if over := ef.GetValue("--hostname-override"); over != "" {
			if h.HostnameOverride != "" && h.HostnameOverride != over {
				return fmt.Errorf("hostname and installFlags kubelet-extra-args hostname-override mismatch, only define either one")
			}
			h.HostnameOverride = over
		}
	}

	if h.HostnameOverride != "" {
		log.Infof("%s: using %s from configuration as hostname", h, h.HostnameOverride)
		h.Metadata.Hostname = h.HostnameOverride
	} else {
		n := h.Configurer.Hostname(h)
		if n == "" {
			return fmt.Errorf("%s: failed to resolve a hostname", h)
		}
		h.Metadata.Hostname = n
		log.Infof("%s: using %s as hostname", h, n)
	}

	if h.PrivateAddress == "" {
		if h.PrivateInterface == "" {
			if iface, err := h.Configurer.PrivateInterface(h); err == nil {
				h.PrivateInterface = iface
				log.Infof("%s: discovered %s as private interface", h, iface)
			}
		}

		if h.PrivateInterface != "" {
			if addr, err := h.Configurer.PrivateAddress(h, h.PrivateInterface, h.Address()); err == nil {
				h.PrivateAddress = addr
				log.Infof("%s: discovered %s as private address", h, addr)
			}
		}
	}

	if p.Config.Spec.K0s.Version.LessThan(kubeletRootDirSince) && h.KubeletRootDir != "" {
		return fmt.Errorf("kubeletRootDir is not supported in k0s version %s, please remove it from the configuration", p.Config.Spec.K0s.Version)
	}

	return nil
}
0707010000006E000081A40000000000000000000000016842976900002AC7000000000000000000000000000000000000002800000000k0sctl-0.25.1/phase/gather_k0s_facts.gopackage phase

import (
	"context"
	"encoding/json"
	"fmt"
	"net"
	"net/url"
	"path"
	"strings"

	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

type k0sstatus struct {
	Version       *version.Version `json:"Version"`
	Pid           int              `json:"Pid"`
	PPid          int              `json:"PPid"`
	Role          string           `json:"Role"`
	SysInit       string           `json:"SysInit"`
	StubFile      string           `json:"StubFile"`
	Workloads     bool             `json:"Workloads"`
	Args          []string         `json:"Args"`
	ClusterConfig dig.Mapping      `json:"ClusterConfig"`
	K0sVars       dig.Mapping      `json:"K0sVars"`
}

func (k *k0sstatus) isSingle() bool {
	for _, a := range k.Args {
		if a == "--single=true" {
			return true
		}
	}
	return false
}

// GatherK0sFacts gathers information about hosts, such as if k0s is already up and running
type GatherK0sFacts struct {
	GenericPhase
	leader *cluster.Host
	hosts  cluster.Hosts
}

// Title for the phase
func (p *GatherK0sFacts) Title() string {
	return "Gather k0s facts"
}

// Prepare finds hosts with k0s installed
func (p *GatherK0sFacts) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		return h.Exec(h.Configurer.K0sCmdf("version"), exec.Sudo(h)) == nil
	})

	return nil
}

// ShouldRun is true when there are hosts that need to be connected
func (p *GatherK0sFacts) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *GatherK0sFacts) Run(ctx context.Context) error {
	var controllers cluster.Hosts = p.hosts.Controllers()
	if err := p.parallelDo(ctx, controllers, p.investigateK0s); err != nil {
		return err
	}
	p.leader = p.Config.Spec.K0sLeader()
	p.leader.Metadata.IsK0sLeader = true

	if id, err := p.Config.Spec.K0s.GetClusterID(p.leader); err == nil {
		p.Config.Spec.K0s.Metadata.ClusterID = id
	}

	if err := p.investigateEtcd(); err != nil {
		return err
	}

	var workers cluster.Hosts = p.hosts.Workers()
	if err := p.parallelDo(ctx, workers, p.investigateK0s); err != nil {
		return err
	}

	return nil
}

func (p *GatherK0sFacts) isInternalEtcd() bool {
	if p.leader.Role != "controller" && p.leader.Role != "controller+worker" {
		return false
	}

	if p.leader.Metadata.K0sRunningVersion == nil {
		return false
	}

	if p.Config.Spec.K0s == nil || p.Config.Spec.K0s.Config == nil {
		log.Debugf("%s: k0s config not found, expecting default internal etcd", p.leader)
		return true
	}

	log.Debugf("%s: checking storage config for etcd", p.leader)
	if storageConfig, ok := p.Config.Spec.K0s.Config.Dig("spec", "storage").(dig.Mapping); ok {
		storageType := storageConfig.DigString("type")
		switch storageType {
		case "etcd":
			if _, ok := storageConfig.Dig("etcd", "externalCluster").(dig.Mapping); ok {
				log.Debugf("%s: storage is configured with external etcd", p.leader)
				return false
			}
			log.Debugf("%s: storage type is etcd", p.leader)
			return true
		case "":
			log.Debugf("%s: storage type is default", p.leader)
			return true
		default:
			log.Debugf("%s: storage type is %s", p.leader, storageType)
			return false
		}
	}

	log.Debugf("%s: storage config not found, expecting default internal etcd", p.leader)
	return true
}

func (p *GatherK0sFacts) investigateEtcd() error {
	if !p.isInternalEtcd() {
		log.Debugf("%s: skipping etcd member list", p.leader)
		return nil
	}

	if err := p.listEtcdMembers(p.leader); err != nil {
		return err
	}

	return nil
}

func (p *GatherK0sFacts) listEtcdMembers(h *cluster.Host) error {
	log.Infof("%s: listing etcd members", h)
	// etcd member-list outputs json like:
	// {"members":{"controller0":"https://172.17.0.2:2380","controller1":"https://172.17.0.3:2380"}}
	// on versions like ~1.21.x etcd member-list outputs to stderr with extra fields (from logrus).
	output, err := h.ExecOutput(h.Configurer.K0sCmdf("etcd member-list --data-dir=%s 2>&1", h.K0sDataDir()), exec.Sudo(h))
	if err != nil {
		return fmt.Errorf("failed to run list etcd members command: %w", err)
	}

	result := make(map[string]any)
	if err := json.Unmarshal([]byte(output), &result); err != nil {
		return fmt.Errorf("failed to decode etcd member-list output: %w", err)
	}

	etcdMembers := []string{}
	if members, ok := result["members"].(map[string]any); ok {
		for _, urlField := range members {
			urlFieldStr, ok := urlField.(string)
			if ok {
				memberURL, err := url.Parse(urlFieldStr)
				if err != nil {
					return fmt.Errorf("failed to parse etcd member URL: %w", err)
				}
				memberHost, _, err := net.SplitHostPort(memberURL.Host)
				if err != nil {
					return fmt.Errorf("failed to split etcd member URL: %w", err)
				}
				log.Debugf("%s: detected etcd member %s", h, memberHost)
				etcdMembers = append(etcdMembers, memberHost)
			}
		}
	}

	p.Config.Metadata.EtcdMembers = etcdMembers
	return nil
}

func (p *GatherK0sFacts) investigateK0s(ctx context.Context, h *cluster.Host) error {
	output, err := h.ExecOutput(h.Configurer.K0sCmdf("version"), exec.Sudo(h))
	if err != nil {
		log.Debugf("%s: no 'k0s' binary in PATH", h)
		return nil
	}

	binVersion, err := version.NewVersion(strings.TrimSpace(output))
	if err != nil {
		return fmt.Errorf("failed to parse installed k0s version: %w", err)
	}

	h.Metadata.K0sBinaryVersion = binVersion

	log.Debugf("%s: has k0s binary version %s", h, h.Metadata.K0sBinaryVersion)

	if h.IsController() && h.Configurer.FileExist(h, h.K0sConfigPath()) {
		cfg, err := h.Configurer.ReadFile(h, h.K0sConfigPath())
		if cfg != "" && err == nil {
			log.Infof("%s: found existing configuration", h)
			h.Metadata.K0sExistingConfig = cfg
		}
	}

	var existingServiceScript string

	for _, svc := range []string{"k0scontroller", "k0sworker", "k0sserver"} {
		if path, err := h.Configurer.ServiceScriptPath(h, svc); err == nil && path != "" {
			existingServiceScript = path
			break
		}
	}

	output, err = h.ExecOutput(h.Configurer.K0sCmdf("status -o json"), exec.Sudo(h))
	if err != nil {
		if existingServiceScript == "" {
			log.Debugf("%s: an existing k0s instance is not running and does not seem to have been installed as a service", h)
			return nil
		}

		if Force {
			log.Warnf("%s: an existing k0s instance is not running but has been installed as a service at %s - ignoring because --force was given", h, existingServiceScript)
			return nil
		}

		return fmt.Errorf("k0s doesn't appear to be running but has been installed as a service at %s - please remove it or start the service", existingServiceScript)
	}

	if existingServiceScript == "" {
		return fmt.Errorf("k0s is running but has not been installed as a service, possibly a non-k0sctl managed host or a broken installation - you can try to reset the host by setting `reset: true` on it")
	}

	status := k0sstatus{}

	if err := json.Unmarshal([]byte(output), &status); err != nil {
		log.Warnf("%s: failed to decode k0s status output: %s", h, err.Error())
		return nil
	}

	if status.Version == nil || status.Role == "" || status.Pid == 0 {
		log.Debugf("%s: k0s is not running", h)
		return nil
	}

	switch status.Role {
	case "server":
		status.Role = "controller"
	case "server+worker":
		status.Role = "controller+worker"
	case "controller":
		if status.Workloads {
			if status.isSingle() {
				status.Role = "single"
			} else {
				status.Role = "controller+worker"
			}
		}
	}

	if status.Role != h.Role {
		return fmt.Errorf("%s: is configured as k0s %s but is already running as %s - role change is not supported", h, h.Role, status.Role)
	}

	h.Metadata.K0sRunningVersion = status.Version
	if p.Config.Spec.K0s.Version == nil {
		p.Config.Spec.K0s.Version = status.Version
	}

	h.Metadata.NeedsUpgrade = p.needsUpgrade(h)

	var args cluster.Flags
	if len(status.Args) > 2 {
		// status.Args contains the binary path and the role as the first two elements, which we can ignore here.
		for _, a := range status.Args[2:] {
			args.Add(a)
		}
	}
	h.Metadata.K0sStatusArgs = args

	log.Infof("%s: is running k0s %s version %s", h, h.Role, h.Metadata.K0sRunningVersion)
	if h.IsController() {
		for _, a := range h.Metadata.K0sStatusArgs {
			if strings.HasPrefix(a, "--enable-dynamic-config") && !strings.HasSuffix(a, "false") {
				if !p.Config.Spec.K0s.DynamicConfig {
					log.Warnf("%s: controller has dynamic config enabled, but spec.k0s.dynamicConfig was not set in configuration, proceeding in dynamic config mode", h)
					p.Config.Spec.K0s.DynamicConfig = true
				}
			}
		}
		if h.InstallFlags.Include("--enable-dynamic-config") {
			if val := h.InstallFlags.GetValue("--enable-dynamic-config"); val != "false" {
				if !p.Config.Spec.K0s.DynamicConfig {
					log.Warnf("%s: controller has --enable-dynamic-config in installFlags, but spec.k0s.dynamicConfig was not set in configuration, proceeding in dynamic config mode", h)
				}
				p.Config.Spec.K0s.DynamicConfig = true
			}
		}

		if p.Config.Spec.K0s.DynamicConfig {
			h.InstallFlags.AddOrReplace("--enable-dynamic-config")
		}
	}

	if h.Role == "controller+worker" && !h.NoTaints {
		log.Warnf("%s: the controller+worker node will not schedule regular workloads without toleration for node-role.kubernetes.io/master:NoSchedule unless 'noTaints: true' is set", h)
	}

	if h.Metadata.NeedsUpgrade {
		log.Warnf("%s: k0s will be upgraded", h)
	}

	if !h.IsController() {
		log.Infof("%s: checking if worker %s has joined", p.leader, h.Metadata.Hostname)
		if err := node.KubeNodeReadyFunc(h)(ctx); err != nil {
			log.Debugf("%s: failed to get ready status: %s", h, err.Error())
		} else {
			h.Metadata.Ready = true
		}
	}

	return nil
}

func (p *GatherK0sFacts) needsUpgrade(h *cluster.Host) bool {
	if h.Reset {
		return false
	}

	// If supplimental files or a k0s binary have been specified explicitly,
	// always upgrade.  This covers the scenario where a user moves from a
	// default-install cluster to one fed by OCI image bundles (ie. airgap)
	for _, f := range h.Files {
		if f.IsURL() {
			log.Debugf("%s: marked for upgrade because there are URL source file uploads for the host", h)
			return true
		}

		for _, s := range f.Sources {
			dest := f.DestinationFile
			if dest == "" {
				dest = path.Join(f.DestinationDir, s.Path)
			}
			src := path.Join(f.Base, s.Path)

			if h.FileChanged(src, dest) {
				log.Debugf("%s: marked for upgrade because file was changed for upload %s", h, src)
				return true
			}
		}
	}

	if h.K0sBinaryPath != "" && h.FileChanged(h.K0sBinaryPath, h.Configurer.K0sBinaryPath()) {
		log.Debugf("%s: marked for upgrade because of a static k0s binary path %s", h, h.K0sBinaryPath)
		return true
	}

	return p.Config.Spec.K0s.Version.GreaterThan(h.Metadata.K0sRunningVersion)
}
0707010000006F000081A4000000000000000000000001684297690000035A000000000000000000000000000000000000002D00000000k0sctl-0.25.1/phase/gather_k0s_facts_test.gopackage phase

import (
	"testing"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/version"
	"github.com/stretchr/testify/require"
)

func TestNeedsUpgrade(t *testing.T) {
	cfg := &v1beta1.Cluster{
		Spec: &cluster.Spec{
			K0s: &cluster.K0s{
				Version: version.MustParse("1.23.3+k0s.1"),
			},
		},
	}
	h := &cluster.Host{
		Metadata: cluster.HostMetadata{
			K0sRunningVersion: version.MustParse("1.23.3+k0s.1"),
		},
	}

	p := GatherK0sFacts{GenericPhase: GenericPhase{Config: cfg}}

	require.False(t, p.needsUpgrade(h))
	h.Metadata.K0sRunningVersion = version.MustParse("1.23.3+k0s.2")
	require.False(t, p.needsUpgrade(h))
	h.Metadata.K0sRunningVersion = version.MustParse("1.23.3+k0s.0")
	require.True(t, p.needsUpgrade(h))
}
07070100000070000081A40000000000000000000000016842976900000778000000000000000000000000000000000000002500000000k0sctl-0.25.1/phase/generic_phase.gopackage phase

import (
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

// GenericPhase is a basic phase which gets a config via prepare, sets it into p.Config
type GenericPhase struct {
	Config *v1beta1.Cluster

	manager *Manager
}

// GetConfig is an accessor to phase Config
func (p *GenericPhase) GetConfig() *v1beta1.Cluster {
	return p.Config
}

// Prepare the phase
func (p *GenericPhase) Prepare(c *v1beta1.Cluster) error {
	p.Config = c
	return nil
}

// Wet is a shorthand for manager.Wet
func (p *GenericPhase) Wet(host fmt.Stringer, msg string, funcs ...errorfunc) error {
	return p.manager.Wet(host, msg, funcs...)
}

// IsWet returns true if manager is in dry-run mode
func (p *GenericPhase) IsWet() bool {
	return !p.manager.DryRun
}

// DryMsg is a shorthand for manager.DryMsg
func (p *GenericPhase) DryMsg(host fmt.Stringer, msg string) {
	p.manager.DryMsg(host, msg)
}

// DryMsgf is a shorthand for manager.DryMsg + fmt.Sprintf
func (p *GenericPhase) DryMsgf(host fmt.Stringer, msg string, args ...any) {
	p.manager.DryMsg(host, fmt.Sprintf(msg, args...))
}

// SetManager adds a reference to the phase manager
func (p *GenericPhase) SetManager(m *Manager) {
	p.manager = m
}

func (p *GenericPhase) parallelDo(ctx context.Context, hosts cluster.Hosts, funcs ...func(context.Context, *cluster.Host) error) error {
	if p.manager.Concurrency == 0 {
		return hosts.ParallelEach(ctx, funcs...)
	}
	return hosts.BatchedParallelEach(ctx, p.manager.Concurrency, funcs...)
}

func (p *GenericPhase) parallelDoUpload(ctx context.Context, hosts cluster.Hosts, funcs ...func(context.Context, *cluster.Host) error) error {
	if p.manager.Concurrency == 0 {
		return hosts.ParallelEach(ctx, funcs...)
	}
	return hosts.BatchedParallelEach(ctx, p.manager.ConcurrentUploads, funcs...)
}
07070100000071000081A400000000000000000000000168429769000008E8000000000000000000000000000000000000002600000000k0sctl-0.25.1/phase/get_kubeconfig.gopackage phase

import (
	"context"
	"fmt"

	"al.essio.dev/pkg/shellescape"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	"k8s.io/client-go/tools/clientcmd"
)

// GetKubeconfig is a phase to get and dump the admin kubeconfig
type GetKubeconfig struct {
	GenericPhase
	APIAddress string
	User       string
	Cluster    string
}

// Title for the phase
func (p *GetKubeconfig) Title() string {
	return "Get admin kubeconfig"
}

var readKubeconfig = func(h *cluster.Host) (string, error) {
	output, err := h.ExecOutput(h.Configurer.K0sCmdf("kubeconfig admin --data-dir=%s", shellescape.Quote(h.K0sDataDir())), exec.Sudo(h), exec.HideOutput())
	if err != nil {
		return "", fmt.Errorf("get kubeconfig from host: %w", err)
	}
	return output, nil
}

func (p *GetKubeconfig) DryRun() error {
	p.DryMsg(p.Config.Spec.Hosts.Controllers()[0], "get admin kubeconfig")
	return nil
}

// Run the phase
func (p *GetKubeconfig) Run(_ context.Context) error {
	h := p.Config.Spec.Hosts.Controllers()[0]

	output, err := readKubeconfig(h)
	if err != nil {
		return fmt.Errorf("read kubeconfig from host: %w", err)
	}

	if p.APIAddress == "" {
		p.APIAddress = p.Config.Spec.KubeAPIURL()
	}

	if p.User != "" {
		p.Config.Metadata.User = p.User
	}

	if p.Cluster != "" {
		p.Config.Metadata.Name = p.Cluster
	}

	cfgString, err := kubeConfig(output, p.Config.Metadata.Name, p.APIAddress, p.Config.Metadata.User)
	if err != nil {
		return err
	}

	p.Config.Metadata.Kubeconfig = cfgString

	return nil
}

// kubeConfig reads in the raw kubeconfig and changes the given address
// and cluster name into it
func kubeConfig(raw string, name string, address, user string) (string, error) {
	cfg, err := clientcmd.Load([]byte(raw))
	if err != nil {
		return "", err
	}

	cfg.Clusters[name] = cfg.Clusters["local"]
	delete(cfg.Clusters, "local")
	cfg.Clusters[name].Server = address

	cfg.Contexts[name] = cfg.Contexts["Default"]
	delete(cfg.Contexts, "Default")
	cfg.Contexts[name].Cluster = name
	cfg.Contexts[name].AuthInfo = user

	cfg.CurrentContext = name

	cfg.AuthInfos[user] = cfg.AuthInfos["user"]
	delete(cfg.AuthInfos, "user")

	out, err := clientcmd.Write(*cfg)
	if err != nil {
		return "", err
	}

	return string(out), nil
}
07070100000072000081A400000000000000000000000168429769000006B5000000000000000000000000000000000000002B00000000k0sctl-0.25.1/phase/get_kubeconfig_test.gopackage phase

import (
	"context"
	"strings"
	"testing"

	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig"
	"github.com/stretchr/testify/require"
	"k8s.io/client-go/tools/clientcmd"
)

func fakeReader(h *cluster.Host) (string, error) {
	return strings.ReplaceAll(`apiVersion: v1
clusters:
- cluster:
    server: https://localhost:6443
  name: local
contexts:
- context:
    cluster: local
    user: user
  name: Default
current-context: Default
kind: Config
preferences: {}
users:
- name: user
  user:
`, "\t", "  "), nil
}

func TestGetKubeconfig(t *testing.T) {
	cfg := &v1beta1.Cluster{
		Metadata: &v1beta1.ClusterMetadata{
			Name: "k0s",
		},
		Spec: &cluster.Spec{
			K0s: &cluster.K0s{Config: dig.Mapping{}},
			Hosts: []*cluster.Host{
				{Role: "controller", Connection: rig.Connection{SSH: &rig.SSH{Address: "10.0.0.1", Port: 22}}},
			},
		},
	}

	origReadKubeconfig := readKubeconfig
	defer func() { readKubeconfig = origReadKubeconfig }()
	readKubeconfig = fakeReader

	p := GetKubeconfig{GenericPhase: GenericPhase{Config: cfg}}
	require.NoError(t, p.Run(context.Background()))
	conf, err := clientcmd.Load([]byte(cfg.Metadata.Kubeconfig))
	require.NoError(t, err)
	require.Equal(t, "https://10.0.0.1:6443", conf.Clusters["k0s"].Server)

	cfg.Spec.Hosts[0].Connection.SSH.Address = "abcd:efgh:ijkl:mnop"
	p.APIAddress = ""
	require.NoError(t, p.Run(context.Background()))
	conf, err = clientcmd.Load([]byte(cfg.Metadata.Kubeconfig))
	require.NoError(t, err)
	require.Equal(t, "https://[abcd:efgh:ijkl:mnop]:6443", conf.Clusters["k0s"].Server)
}
07070100000073000081A40000000000000000000000016842976900001085000000000000000000000000000000000000002600000000k0sctl-0.25.1/phase/initialize_k0s.gopackage phase

import (
	"context"
	"fmt"
	"strings"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// InitializeK0s sets up the "initial" k0s controller
type InitializeK0s struct {
	GenericPhase
	leader *cluster.Host
}

// Title for the phase
func (p *InitializeK0s) Title() string {
	return "Initialize the k0s cluster"
}

// Prepare the phase
func (p *InitializeK0s) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	leader := p.Config.Spec.K0sLeader()
	if leader.Metadata.K0sRunningVersion == nil {
		p.leader = leader
	}
	return nil
}

// ShouldRun is true when there is a leader host
func (p *InitializeK0s) ShouldRun() bool {
	return p.leader != nil && !p.leader.Reset
}

// CleanUp cleans up the environment override file
func (p *InitializeK0s) CleanUp() {
	h := p.leader

	log.Infof("%s: cleaning up", h)
	if len(h.Environment) > 0 {
		if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
			log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
		}
	}
	if h.Metadata.K0sInstalled {
		if err := h.Exec(h.K0sResetCommand(), exec.Sudo(h)); err != nil {
			log.Warnf("%s: k0s reset failed", h)
		}
	}
}

// Run the phase
func (p *InitializeK0s) Run(ctx context.Context) error {
	h := p.leader
	h.Metadata.IsK0sLeader = true

	if p.Config.Spec.K0s.DynamicConfig || (h.InstallFlags.Include("--enable-dynamic-config") && h.InstallFlags.GetValue("--enable-dynamic-config") != "false") {
		p.Config.Spec.K0s.DynamicConfig = true
		h.InstallFlags.AddOrReplace("--enable-dynamic-config")
	}

	if Force {
		log.Warnf("%s: --force given, using k0s install with --force", h)
		h.InstallFlags.AddOrReplace("--force=true")
	}

	log.Infof("%s: installing k0s controller", h)
	cmd, err := h.K0sInstallCommand()
	if err != nil {
		return err
	}

	err = p.Wet(p.leader, fmt.Sprintf("install first k0s controller using `%s`", strings.ReplaceAll(cmd, p.leader.Configurer.K0sBinaryPath(), "k0s")), func() error {
		return h.Exec(cmd, exec.Sudo(h))
	}, func() error {
		p.leader.Metadata.DryRunFakeLeader = true
		return nil
	})
	if err != nil {
		return err
	}

	h.Metadata.K0sInstalled = true

	if len(h.Environment) > 0 {
		err = p.Wet(h, "configure k0s service environment variables", func() error {
			log.Infof("%s: updating service environment", h)
			return h.Configurer.UpdateServiceEnvironment(h, h.K0sServiceName(), h.Environment)
		}, func() error {
			for k, v := range h.Environment {
				p.DryMsgf(h, "%s=<%d characters>", k, len(v))
			}
			return nil
		})
		if err != nil {
			return err
		}
	}

	err = p.Wet(h, "start k0s service", func() error {
		if err := h.Configurer.StartService(h, h.K0sServiceName()); err != nil {
			return err
		}

		log.Infof("%s: waiting for the k0s service to start", h)
		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceRunningFunc(h, h.K0sServiceName())); err != nil {
			return err
		}

		log.Infof("%s: wait for kubernetes to reach ready state", h)
		err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, func(_ context.Context) error {
			out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz'"), exec.Sudo(h))
			if out != "ok" {
				return fmt.Errorf("kubernetes api /readyz responded with %q", out)
			}
			return err
		})
		if err != nil {
			return fmt.Errorf("kubernetes not ready: %w", err)
		}

		h.Metadata.Ready = true

		return nil
	})
	if err != nil {
		return err
	}

	if p.IsWet() && p.Config.Spec.K0s.DynamicConfig {
		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.K0sDynamicConfigReadyFunc(h)); err != nil {
			return fmt.Errorf("dynamic config reconciliation failed: %w", err)
		}
	}

	h.Metadata.K0sRunningVersion = p.Config.Spec.K0s.Version
	h.Metadata.K0sBinaryVersion = p.Config.Spec.K0s.Version
	h.Metadata.Ready = true

	if p.IsWet() {
		if id, err := p.Config.Spec.K0s.GetClusterID(h); err == nil {
			p.Config.Spec.K0s.Metadata.ClusterID = id
		}
	}

	return nil
}
07070100000074000081A40000000000000000000000016842976900000AB1000000000000000000000000000000000000002800000000k0sctl-0.25.1/phase/install_binaries.gopackage phase

import (
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	"github.com/sirupsen/logrus"
)

// InstallBinaries installs the k0s binaries from the temp location of UploadBinaries or InstallBinaries
type InstallBinaries struct {
	GenericPhase
	hosts cluster.Hosts
}

// Title for the phase
func (p *InstallBinaries) Title() string {
	return "Install k0s binaries on hosts"
}

// Prepare the phase
func (p *InstallBinaries) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		if h.Reset && h.Metadata.K0sBinaryVersion != nil {
			return false
		}

		// Upgrade is handled in UpgradeControllers/UpgradeWorkers phases
		if h.Metadata.NeedsUpgrade {
			return false
		}

		return h.Metadata.K0sBinaryTempFile != ""
	})
	return nil
}

// ShouldRun is true when the phase should be run
func (p *InstallBinaries) ShouldRun() bool {
	return len(p.hosts) > 0 || !p.IsWet()
}

// DryRun reports what would happen if Run is called.
func (p *InstallBinaries) DryRun() error {
	return p.parallelDo(
		context.Background(),
		p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool { return h.Metadata.K0sBinaryTempFile != "" }),
		func(_ context.Context, h *cluster.Host) error {
			p.DryMsgf(h, "install k0s %s binary from %s to %s", p.Config.Spec.K0s.Version, h.Metadata.K0sBinaryTempFile, h.Configurer.K0sBinaryPath())
			if err := h.Execf(`chmod +x "%s"`, h.Metadata.K0sBinaryTempFile, exec.Sudo(h)); err != nil {
				logrus.Warnf("%s: failed to chmod k0s temp binary for dry-run: %s", h, err.Error())
			}
			h.Configurer.SetPath("K0sBinaryPath", h.Metadata.K0sBinaryTempFile)
			h.Metadata.K0sBinaryVersion = p.Config.Spec.K0s.Version
			return nil
		},
	)
}

// Run the phase
func (p *InstallBinaries) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.hosts, p.installBinary)
}

func (p *InstallBinaries) installBinary(_ context.Context, h *cluster.Host) error {
	if err := h.UpdateK0sBinary(h.Metadata.K0sBinaryTempFile, p.Config.Spec.K0s.Version); err != nil {
		return fmt.Errorf("failed to install k0s binary: %w", err)
	}
	h.Metadata.K0sBinaryTempFile = ""

	return nil
}

func (p *InstallBinaries) CleanUp() {
	err := p.parallelDo(context.Background(), p.hosts, func(_ context.Context, h *cluster.Host) error {
		if h.Metadata.K0sBinaryTempFile == "" {
			return nil
		}
		logrus.Infof("%s: cleaning up k0s binary tempfile", h)
		_ = h.Configurer.DeleteFile(h, h.Metadata.K0sBinaryTempFile)
		return nil
	})
	if err != nil {
		logrus.Debugf("failed to clean up tempfiles: %v", err)
	}
}
07070100000075000081A40000000000000000000000016842976900002111000000000000000000000000000000000000002B00000000k0sctl-0.25.1/phase/install_controllers.gopackage phase

import (
	"bytes"
	"context"
	"fmt"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// InstallControllers installs k0s controllers and joins them to the cluster
type InstallControllers struct {
	GenericPhase
	hosts      cluster.Hosts
	leader     *cluster.Host
	numRunning int
}

// Title for the phase
func (p *InstallControllers) Title() string {
	return "Install controllers"
}

// Prepare the phase
func (p *InstallControllers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()
	var countRunning int
	p.hosts = p.Config.Spec.Hosts.Controllers().Filter(func(h *cluster.Host) bool {
		if h.Metadata.K0sRunningVersion != nil {
			countRunning++
		}
		return !h.Reset && !h.Metadata.NeedsUpgrade && (h != p.leader && h.Metadata.K0sRunningVersion == nil)
	})
	p.numRunning = countRunning
	return nil
}

// ShouldRun is true when there are controllers
func (p *InstallControllers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// CleanUp cleans up the environment override files on hosts
func (p *InstallControllers) CleanUp() {
	_ = p.After()
	_ = p.hosts.Filter(func(h *cluster.Host) bool {
		return !h.Metadata.Ready
	}).ParallelEach(context.Background(), func(_ context.Context, h *cluster.Host) error {
		log.Infof("%s: cleaning up", h)
		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %v", h, err)
			}
		}
		if h.Metadata.K0sInstalled && p.IsWet() {
			if err := h.Exec(h.K0sResetCommand(), exec.Sudo(h)); err != nil {
				log.Warnf("%s: k0s reset failed", h)
			}
		}
		return nil
	})
}

func (p *InstallControllers) After() error {
	for i, h := range p.hosts {
		if h.Metadata.K0sTokenData.Token == "" {
			continue
		}
		h.Metadata.K0sTokenData.Token = ""
		err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for controller %s", h), func() error {
			log.Debugf("%s: invalidating join token for controller %d", p.leader, i+1)
			return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader))
		})
		if err != nil {
			log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err)
		}
		_ = p.Wet(h, "overwrite k0s join token file", func() error {
			if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), "# overwritten by k0sctl after join\n", "0600"); err != nil {
				log.Warnf("%s: failed to overwrite the join token file at %s", h, h.K0sJoinTokenPath())
			}
			return nil
		})
	}
	return nil
}

// Run the phase
func (p *InstallControllers) Run(ctx context.Context) error {
	for _, h := range p.hosts {
		if p.IsWet() {
			log.Infof("%s: generate join token for %s", p.leader, h)
			token, err := p.Config.Spec.K0s.GenerateToken(
				ctx,
				p.leader,
				"controller",
				30*time.Minute,
			)
			if err != nil {
				return err
			}
			tokenData, err := cluster.ParseToken(token)
			if err != nil {
				return err
			}
			h.Metadata.K0sTokenData = tokenData
		} else {
			p.DryMsgf(p.leader, "generate a k0s join token for controller %s", h)
			h.Metadata.K0sTokenData.ID = "dry-run"
			h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL()
		}
	}
	err := p.parallelDo(ctx, p.hosts, func(_ context.Context, h *cluster.Host) error {
		if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader {
			log.Infof("%s: validating api connection to %s", h, h.Metadata.K0sTokenData.URL)
			if err := retry.AdaptiveTimeout(ctx, 30*time.Second, node.HTTPStatusFunc(h, h.Metadata.K0sTokenData.URL, 200, 401, 404)); err != nil {
				return fmt.Errorf("failed to connect from controller to kubernetes api - check networking: %w", err)
			}
		} else {
			log.Warnf("%s: dry-run: skipping api connection validation to because cluster is not actually running", h)
		}
		return nil
	})
	if err != nil {
		return err
	}

	// just one controller to install, install it and return
	if len(p.hosts) == 1 {
		log.Debug("only one controller to install")
		return p.installK0s(ctx, p.hosts[0])
	}

	if p.manager.Concurrency < 2 {
		log.Debugf("installing %d controllers sequantially because concurrency is set to 1", len(p.hosts))
		return p.hosts.Each(ctx, p.installK0s)
	}

	var remaining cluster.Hosts
	remaining = append(remaining, p.hosts...)

	if p.numRunning == 1 && len(remaining) >= 2 {
		perBatch := min(2, p.manager.Concurrency)
		firstBatch := remaining[:perBatch]

		log.Debugf("installing first %d controllers to reach HA state and quorum", perBatch)
		if err := firstBatch.BatchedParallelEach(ctx, perBatch, p.installK0s); err != nil {
			return err
		}
		remaining = remaining[perBatch:]
		p.numRunning += perBatch

		if len(remaining) == 0 {
			log.Debug("all controllers installed")
			return nil
		}
		log.Debugf("remaining %d controllers to install", len(remaining))
	}

	if p.numRunning%2 == 0 {
		log.Debug("even number of running controllers, installing one first to reach quorum")
		if err := p.installK0s(ctx, remaining[0]); err != nil {
			return err
		}
		remaining = remaining[1:]
		p.numRunning++
	}

	// install the rest in parallel in uneven quorum-optimized batches
	for len(remaining) > 0 {
		currentTotal := p.numRunning + len(remaining)
		quorum := (currentTotal / 2) + 1
		safeMax := (quorum / 2)
		if safeMax < 1 {
			safeMax = 1
		}

		perBatch := min(safeMax, p.manager.Concurrency, len(remaining))

		log.Debugf("installing next %d controllers (quorum=%d, total=%d)", perBatch, quorum, currentTotal)

		batch := remaining[:perBatch]
		if err := batch.BatchedParallelEach(ctx, perBatch, p.installK0s); err != nil {
			return err
		}

		remaining = remaining[perBatch:]
		p.numRunning += perBatch
	}
	log.Debug("all controllers installed")
	return nil
}

func (p *InstallControllers) installK0s(ctx context.Context, h *cluster.Host) error {
	tokenPath := h.K0sJoinTokenPath()
	log.Infof("%s: writing join token to %s", h, tokenPath)
	err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error {
		return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600")
	})
	if err != nil {
		return err
	}

	if p.Config.Spec.K0s.DynamicConfig {
		h.InstallFlags.AddOrReplace("--enable-dynamic-config")
	}

	if Force {
		log.Warnf("%s: --force given, using k0s install with --force", h)
		h.InstallFlags.AddOrReplace("--force=true")
	}

	cmd, err := h.K0sInstallCommand()
	if err != nil {
		return err
	}
	log.Infof("%s: installing k0s controller", h)
	err = p.Wet(h, fmt.Sprintf("install k0s controller using `%s", strings.ReplaceAll(cmd, h.Configurer.K0sBinaryPath(), "k0s")), func() error {
		var stdout, stderr bytes.Buffer
		runner, err := h.ExecStreams(cmd, nil, &stdout, &stderr, exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("run k0s install: %w", err)
		}
		if err := runner.Wait(); err != nil {
			log.Errorf("%s: k0s install failed: %s %s", h, stdout.String(), stderr.String())
			return fmt.Errorf("k0s install failed: %w", err)
		}

		return nil
	})
	if err != nil {
		return err
	}
	h.Metadata.K0sInstalled = true
	h.Metadata.K0sRunningVersion = p.Config.Spec.K0s.Version

	if p.IsWet() {
		if len(h.Environment) > 0 {
			log.Infof("%s: updating service environment", h)
			if err := h.Configurer.UpdateServiceEnvironment(h, h.K0sServiceName(), h.Environment); err != nil {
				return err
			}
		}

		log.Infof("%s: starting service", h)
		if err := h.Configurer.StartService(h, h.K0sServiceName()); err != nil {
			return err
		}

		log.Infof("%s: waiting for the k0s service to start", h)
		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceRunningFunc(h, h.K0sServiceName())); err != nil {
			return err
		}

		err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, func(_ context.Context) error {
			out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h))
			if err != nil {
				return fmt.Errorf("readiness endpoint reports %q: %w", out, err)
			}
			return nil
		})
		if err != nil {
			return fmt.Errorf("controller did not reach ready state: %w", err)
		}

		h.Metadata.Ready = true
	}

	return nil
}
07070100000076000081A40000000000000000000000016842976900001B4C000000000000000000000000000000000000002700000000k0sctl-0.25.1/phase/install_workers.gopackage phase

import (
	"context"
	"fmt"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// InstallWorkers installs k0s on worker hosts and joins them to the cluster
type InstallWorkers struct {
	GenericPhase
	hosts  cluster.Hosts
	leader *cluster.Host
}

// Title for the phase
func (p *InstallWorkers) Title() string {
	return "Install workers"
}

// Prepare the phase
func (p *InstallWorkers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Workers().Filter(func(h *cluster.Host) bool {
		return !h.Reset && !h.Metadata.NeedsUpgrade && (h.Metadata.K0sRunningVersion == nil || !h.Metadata.Ready)
	})
	p.leader = p.Config.Spec.K0sLeader()

	return nil
}

// ShouldRun is true when there are workers
func (p *InstallWorkers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// CleanUp attempts to clean up any changes after a failed install
func (p *InstallWorkers) CleanUp() {
	_ = p.After()
	_ = p.hosts.Filter(func(h *cluster.Host) bool {
		return !h.Metadata.Ready
	}).ParallelEach(context.Background(), func(_ context.Context, h *cluster.Host) error {
		log.Infof("%s: cleaning up", h)
		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %v", h, err)
			}
		}
		if h.Metadata.K0sInstalled && p.IsWet() {
			if err := h.Exec(h.K0sResetCommand(), exec.Sudo(h)); err != nil {
				log.Warnf("%s: k0s reset failed", h)
			}
		}
		return nil
	})
}

func (p *InstallWorkers) After() error {
	if NoWait {
		for _, h := range p.hosts {
			if h.Metadata.K0sTokenData.Token != "" {
				log.Warnf("%s: --no-wait given, created join tokens will remain valid for 10 minutes", p.leader)
				break
			}
		}
		return nil
	}
	for i, h := range p.hosts {
		h.Metadata.K0sTokenData.Token = ""
		if h.Metadata.K0sTokenData.ID == "" {
			continue
		}
		err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for worker %s", h), func() error {
			log.Debugf("%s: invalidating join token for worker %d", p.leader, i+1)
			return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader))
		})
		if err != nil {
			log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err)
		}
		_ = p.Wet(h, "overwrite k0s join token file", func() error {
			if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), "# overwritten by k0sctl after join\n", "0600"); err != nil {
				log.Warnf("%s: failed to overwrite the join token file at %s", h, h.K0sJoinTokenPath())
			}
			return nil
		})
	}
	return nil
}

// Run the phase
func (p *InstallWorkers) Run(ctx context.Context) error {
	for i, h := range p.hosts {
		log.Infof("%s: generating a join token for worker %d", p.leader, i+1)
		err := p.Wet(p.leader, fmt.Sprintf("generate a k0s join token for worker %s", h), func() error {
			t, err := p.Config.Spec.K0s.GenerateToken(
				ctx,
				p.leader,
				"worker",
				time.Duration(10*time.Minute),
			)
			if err != nil {
				return err
			}

			td, err := cluster.ParseToken(t)
			if err != nil {
				return fmt.Errorf("parse k0s token: %w", err)
			}

			h.Metadata.K0sTokenData = td

			return nil
		}, func() error {
			h.Metadata.K0sTokenData.ID = "dry-run"
			h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL()
			return nil
		})
		if err != nil {
			return err
		}
	}

	err := p.parallelDo(ctx, p.hosts, func(_ context.Context, h *cluster.Host) error {
		if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader {
			log.Infof("%s: validating api connection to %s using join token", h, h.Metadata.K0sTokenData.URL)
			err := retry.AdaptiveTimeout(ctx, 30*time.Second, func(_ context.Context) error {
				err := h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/version' --kubeconfig=/dev/stdin"), exec.Sudo(h), exec.Stdin(string(h.Metadata.K0sTokenData.Kubeconfig)))
				if err != nil {
					return fmt.Errorf("failed to connect to kubernetes api using the join token - check networking: %w", err)
				}
				return nil
			})
			if err != nil {
				return fmt.Errorf("connectivity check failed: %w", err)
			}
		} else {
			log.Warnf("%s: dry-run: skipping api connection validation because cluster is not actually running", h)
		}
		return nil
	})
	if err != nil {
		return err
	}

	return p.parallelDo(ctx, p.hosts, func(_ context.Context, h *cluster.Host) error {
		tokenPath := h.K0sJoinTokenPath()
		err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error {
			log.Infof("%s: writing join token to %s", h, tokenPath)
			return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600")
		})
		if err != nil {
			return err
		}

		if sp, err := h.Configurer.ServiceScriptPath(h, h.K0sServiceName()); err == nil {
			if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) {
				err := p.Wet(h, "stop existing k0s service", func() error {
					log.Infof("%s: stopping service", h)
					return h.Configurer.StopService(h, h.K0sServiceName())
				})
				if err != nil {
					return err
				}
			}
			if h.Configurer.FileExist(h, sp) {
				err := p.Wet(h, "remove existing k0s service file", func() error {
					return h.Configurer.DeleteFile(h, sp)
				})
				if err != nil {
					return err
				}
			}
		}

		log.Infof("%s: installing k0s worker", h)
		if Force {
			log.Warnf("%s: --force given, using k0s install with --force", h)
			h.InstallFlags.AddOrReplace("--force=true")
		}

		cmd, err := h.K0sInstallCommand()
		if err != nil {
			return err
		}
		err = p.Wet(h, fmt.Sprintf("install k0s worker with `%s`", strings.ReplaceAll(cmd, h.Configurer.K0sBinaryPath(), "k0s")), func() error {
			return h.Exec(cmd, exec.Sudo(h))
		})
		if err != nil {
			return err
		}

		h.Metadata.K0sInstalled = true

		if len(h.Environment) > 0 {
			err := p.Wet(h, "update service environment variables", func() error {
				log.Infof("%s: updating service environment", h)
				return h.Configurer.UpdateServiceEnvironment(h, h.K0sServiceName(), h.Environment)
			})
			if err != nil {
				return err
			}
		}

		if p.IsWet() {
			log.Infof("%s: starting service", h)
			if err := h.Configurer.StartService(h, h.K0sServiceName()); err != nil {
				return err
			}
		}

		if NoWait {
			log.Debugf("%s: not waiting because --no-wait given", h)
			h.Metadata.Ready = true
		} else {
			log.Infof("%s: waiting for node to become ready", h)

			if p.IsWet() {
				if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.KubeNodeReadyFunc(h)); err != nil {
					return err
				}
				h.Metadata.Ready = true
			}
		}

		h.Metadata.K0sRunningVersion = p.Config.Spec.K0s.Version

		return nil
	})
}
07070100000077000081A40000000000000000000000016842976900000CFE000000000000000000000000000000000000001C00000000k0sctl-0.25.1/phase/lock.gopackage phase

import (
	"context"
	"fmt"
	"os"
	gos "os"
	"sync"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// Lock acquires an exclusive k0sctl lock on hosts
type Lock struct {
	GenericPhase
	cfs        []func()
	instanceID string
	m          sync.Mutex
	wg         sync.WaitGroup
}

// Prepare the phase
func (p *Lock) Prepare(c *v1beta1.Cluster) error {
	p.Config = c
	hn, err := os.Hostname()
	if err != nil {
		hn = "unknown"
	}
	p.instanceID = fmt.Sprintf("%s-%d", hn, gos.Getpid())
	return nil
}

// Title for the phase
func (p *Lock) Title() string {
	return "Acquire exclusive host lock"
}

// Cancel releases the lock
func (p *Lock) Cancel() {
	p.m.Lock()
	defer p.m.Unlock()
	for _, f := range p.cfs {
		f()
	}
	p.wg.Wait()
}

// CleanUp calls Cancel to release the lock
func (p *Lock) CleanUp() {
	p.Cancel()
}

// UnlockPhase returns an unlock phase for this lock phase
func (p *Lock) UnlockPhase() Phase {
	return &Unlock{Cancel: p.Cancel}
}

// Run the phase
func (p *Lock) Run(ctx context.Context) error {
	if err := p.parallelDo(ctx, p.Config.Spec.Hosts, p.startLock); err != nil {
		return err
	}
	return p.Config.Spec.Hosts.ParallelEach(ctx, p.startTicker)
}

func (p *Lock) startTicker(ctx context.Context, h *cluster.Host) error {
	p.wg.Add(1)
	lfp := h.Configurer.K0sctlLockFilePath(h)
	ticker := time.NewTicker(10 * time.Second)
	ctx, cancel := context.WithCancel(ctx)
	p.m.Lock()
	p.cfs = append(p.cfs, cancel)
	p.m.Unlock()

	go func() {
		log.Tracef("%s: started periodic update of lock file %s timestamp", h, lfp)
		for {
			select {
			case <-ticker.C:
				if err := h.Configurer.Touch(h, lfp, time.Now(), exec.Sudo(h), exec.HideCommand()); err != nil {
					log.Debugf("%s: failed to touch lock file: %s", h, err)
				}
			case <-ctx.Done():
				log.Tracef("%s: stopped lock cycle, removing file", h)
				if err := h.Configurer.DeleteFile(h, lfp); err != nil {
					log.Debugf("%s: failed to remove host lock file, k0sctl may have been previously aborted or crashed. the start of next invocation may be delayed until it expires: %s", h, err)
				}
				p.wg.Done()
				return
			}
		}
	}()

	return nil
}

func (p *Lock) startLock(ctx context.Context, h *cluster.Host) error {
	return retry.Times(ctx, 10, func(_ context.Context) error {
		return p.tryLock(h)
	})
}

func (p *Lock) tryLock(h *cluster.Host) error {
	lfp := h.Configurer.K0sctlLockFilePath(h)

	if err := h.Configurer.UpsertFile(h, lfp, p.instanceID); err != nil {
		stat, err := h.Configurer.Stat(h, lfp, exec.Sudo(h), exec.HideCommand())
		if err != nil {
			return fmt.Errorf("lock file disappeared: %w", err)
		}
		content, err := h.Configurer.ReadFile(h, lfp)
		if err != nil {
			return fmt.Errorf("failed to read lock file:  %w", err)
		}
		if content != p.instanceID {
			if time.Since(stat.ModTime()) < 30*time.Second {
				return fmt.Errorf("another instance of k0sctl is currently operating on the host, delete %s or wait 30 seconds for it to expire", lfp)
			}
			_ = h.Configurer.DeleteFile(h, lfp)
			return fmt.Errorf("removed existing expired lock file, will retry")
		}
	}

	return nil
}
07070100000078000081A400000000000000000000000168429769000018BC000000000000000000000000000000000000001F00000000k0sctl-0.25.1/phase/manager.gopackage phase

import (
	"context"
	"fmt"
	"io"
	"os"
	"sync"

	"github.com/creasty/defaults"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/logrusorgru/aurora"
	log "github.com/sirupsen/logrus"
)

// NoWait is used by various phases to decide if node ready state should be waited for or not
var NoWait bool

// Force is used by various phases to attempt a forced installation
var Force bool

// Colorize is an instance of "aurora", used to colorize the output
var Colorize = aurora.NewAurora(false)

// Phase represents a runnable phase which can be added to Manager.
type Phase interface {
	Run(context.Context) error
	Title() string
}

// Phases is a slice of Phases
type Phases []Phase

// Index returns the index of the first occurrence matching the given phase title or -1 if not found
func (p Phases) Index(title string) int {
	for i, phase := range p {
		if phase.Title() == title {
			return i
		}
	}
	return -1
}

// Remove removes the first occurrence of a phase with the given title
func (p *Phases) Remove(title string) {
	i := p.Index(title)
	if i == -1 {
		return
	}
	*p = append((*p)[:i], (*p)[i+1:]...)
}

// InsertAfter inserts a phase after the first occurrence of a phase with the given title
func (p *Phases) InsertAfter(title string, phase Phase) {
	i := p.Index(title)
	if i == -1 {
		return
	}
	*p = append((*p)[:i+1], append(Phases{phase}, (*p)[i+1:]...)...)
}

// InsertBefore inserts a phase before the first occurrence of a phase with the given title
func (p *Phases) InsertBefore(title string, phase Phase) {
	i := p.Index(title)
	if i == -1 {
		return
	}
	*p = append((*p)[:i], append(Phases{phase}, (*p)[i:]...)...)
}

// Replace replaces the first occurrence of a phase with the given title
func (p *Phases) Replace(title string, phase Phase) {
	i := p.Index(title)
	if i == -1 {
		return
	}
	(*p)[i] = phase
}

type withconfig interface {
	Title() string
	Prepare(*v1beta1.Cluster) error
}

type conditional interface {
	ShouldRun() bool
}

// beforehook receives the phase title as an argument because of reasons.
type beforehook interface {
	Before(string) error
}

type afterhook interface {
	After(error) error
}

type withcleanup interface {
	CleanUp()
}

type withmanager interface {
	SetManager(*Manager)
}

type withDryRun interface {
	DryRun() error
}

// Manager executes phases to construct the cluster
type Manager struct {
	phases            Phases
	Config            *v1beta1.Cluster
	Concurrency       int
	ConcurrentUploads int
	DryRun            bool
	Writer            io.Writer

	dryMessages map[string][]string
	dryMu       sync.Mutex
}

// NewManager creates a new Manager
func NewManager(config *v1beta1.Cluster) (*Manager, error) {
	if config == nil {
		return nil, fmt.Errorf("config is nil")
	}

	return &Manager{Config: config, Writer: os.Stdout}, nil
}

// AddPhase adds a Phase to Manager
func (m *Manager) AddPhase(p ...Phase) {
	m.phases = append(m.phases, p...)
}

// SetPhases sets the list of phases
func (m *Manager) SetPhases(p Phases) {
	m.phases = p
}

type errorfunc func() error

// DryMsg prints a message in dry-run mode
func (m *Manager) DryMsg(host fmt.Stringer, msg string) {
	m.dryMu.Lock()
	defer m.dryMu.Unlock()
	if m.dryMessages == nil {
		m.dryMessages = make(map[string][]string)
	}
	var key string
	if host == nil {
		key = "local"
	} else {
		key = host.String()
	}
	m.dryMessages[key] = append(m.dryMessages[key], msg)
}

// Wet runs the first given function when not in dry-run mode. The second function will be
// run when in dry-mode and the message will be displayed. Any error returned from the
// functions will be returned and will halt the operation.
func (m *Manager) Wet(host fmt.Stringer, msg string, funcs ...errorfunc) error {
	if !m.DryRun {
		if len(funcs) > 0 && funcs[0] != nil {
			return funcs[0]()
		}
		return nil
	}

	m.DryMsg(host, msg)

	if m.DryRun && len(funcs) == 2 && funcs[1] != nil {
		return funcs[1]()
	}

	return nil
}

// Run executes all the added Phases in order
func (m *Manager) Run(ctx context.Context) error {
	var ran []Phase
	var result error

	if m.Config == nil {
		return fmt.Errorf("cannot run phases: config is nil")
	}

	log.Debug("setting defaults")
	if err := defaults.Set(m.Config); err != nil {
		return fmt.Errorf("failed to set defaults: %w", err)
	}
	log.Debug("final configuration:")
	log.Print(m.Config.String())

	defer func() {
		if m.DryRun {
			if len(m.dryMessages) == 0 {
				fmt.Fprintln(m.Writer, Colorize.BrightGreen("dry-run: no cluster state altering actions would be performed"))
				return
			}

			fmt.Fprintln(m.Writer, Colorize.BrightRed("dry-run: cluster state altering actions would be performed:"))
			for host, msgs := range m.dryMessages {
				fmt.Fprintln(m.Writer, Colorize.BrightRed("dry-run:"), Colorize.Bold(fmt.Sprintf("* %s :", host)))
				for _, msg := range msgs {
					fmt.Println(Colorize.BrightRed("dry-run:"), Colorize.Red(" -"), msg)
				}
			}
			return
		}
		if result != nil {
			for _, p := range ran {
				if c, ok := p.(withcleanup); ok {
					log.Infof(Colorize.Red("* Running clean-up for phase: %s").String(), p.Title())
					c.CleanUp()
				}
			}
		}
	}()

	for _, p := range m.phases {
		title := p.Title()

		if err := ctx.Err(); err != nil {
			result = fmt.Errorf("context canceled before entering phase %q: %w", title, err)
			return result
		}

		if p, ok := p.(withmanager); ok {
			p.SetManager(m)
		}

		if p, ok := p.(withconfig); ok {
			log.Debugf("Preparing phase '%s'", p.Title())
			if err := p.Prepare(m.Config); err != nil {
				return err
			}
		}

		if p, ok := p.(conditional); ok {
			if !p.ShouldRun() {
				continue
			}
		}

		if p, ok := p.(beforehook); ok {
			if err := p.Before(title); err != nil {
				log.Debugf("before hook failed '%s'", err.Error())
				return err
			}
		}

		text := Colorize.Green("==> Running phase: %s").String()
		log.Infof(text, title)

		if dp, ok := p.(withDryRun); ok && m.DryRun {
			if err := dp.DryRun(); err != nil {
				return err
			}
			continue
		}

		result = p.Run(ctx)
		ran = append(ran, p)

		if p, ok := p.(afterhook); ok {
			if err := p.After(result); err != nil {
				log.Debugf("after hook failed: '%s' (phase result: %s)", err.Error(), result)
				return err
			}
		}

		if result != nil {
			return result
		}
	}

	return nil
}
07070100000079000081A40000000000000000000000016842976900000D07000000000000000000000000000000000000002400000000k0sctl-0.25.1/phase/manager_test.gopackage phase

import (
	"context"
	"fmt"
	"testing"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/stretchr/testify/require"
)

type conditionalPhase struct {
	shouldrunCalled bool
	runCalled       bool
}

func (p *conditionalPhase) Title() string {
	return "conditional phase"
}

func (p *conditionalPhase) ShouldRun() bool {
	p.shouldrunCalled = true
	return false
}

func (p *conditionalPhase) Run(_ context.Context) error {
	p.runCalled = true
	return nil
}

func TestConditionalPhase(t *testing.T) {
	m := Manager{Config: &v1beta1.Cluster{Spec: &cluster.Spec{}}}
	p := &conditionalPhase{}
	m.AddPhase(p)
	require.NoError(t, m.Run(context.Background()))
	require.False(t, p.runCalled, "run was not called")
	require.True(t, p.shouldrunCalled, "shouldrun was not called")
}

type configPhase struct {
	receivedConfig bool
}

func (p *configPhase) Title() string {
	return "config phase"
}

func (p *configPhase) Prepare(c *v1beta1.Cluster) error {
	p.receivedConfig = c != nil
	return nil
}

func (p *configPhase) Run(_ context.Context) error {
	return nil
}

func TestConfigPhase(t *testing.T) {
	m := Manager{Config: &v1beta1.Cluster{Spec: &cluster.Spec{}}}
	p := &configPhase{}
	m.AddPhase(p)
	require.NoError(t, m.Run(context.Background()))
	require.True(t, p.receivedConfig, "config was not received")
}

type hookedPhase struct {
	fn            func() error
	beforeCalled  bool
	afterCalled   bool
	cleanupCalled bool
	runCalled     bool
	err           error
}

func (p *hookedPhase) Title() string {
	return "hooked phase"
}

func (p *hookedPhase) Before(_ string) error {
	p.beforeCalled = true
	return nil
}

func (p *hookedPhase) After(err error) error {
	p.afterCalled = true
	p.err = err
	return nil
}

func (p *hookedPhase) CleanUp() {
	p.cleanupCalled = true
}

func (p *hookedPhase) Run(_ context.Context) error {
	p.runCalled = true
	if p.fn != nil {
		return p.fn()
	}
	return fmt.Errorf("run failed")
}

func TestHookedPhase(t *testing.T) {
	m := Manager{Config: &v1beta1.Cluster{Spec: &cluster.Spec{}}}
	p := &hookedPhase{}
	m.AddPhase(p)
	require.Error(t, m.Run(context.Background()))
	require.True(t, p.beforeCalled, "before hook was not called")
	require.True(t, p.afterCalled, "after hook was not called")
	require.EqualError(t, p.err, "run failed")
}

func TestContextCancel(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	m := Manager{Config: &v1beta1.Cluster{Spec: &cluster.Spec{}}}
	p1 := &hookedPhase{fn: func() error {
		cancel()
		return nil
	}}
	p2 := &hookedPhase{}
	m.AddPhase(p1, p2)
	require.Error(t, m.Run(ctx))
	require.Contains(t, ctx.Err().Error(), "cancel")

	require.True(t, p1.beforeCalled, "1st before hook was not called")
	require.True(t, p1.afterCalled, "1st after hook was not called")
	require.True(t, p1.runCalled, "1st run was not called")
	// this should happen because the phase was completed before the context was cancelled
	require.True(t, p1.cleanupCalled, "1st cleanup was not called")

	require.False(t, p2.beforeCalled, "2nd before hook was called")
	require.False(t, p2.afterCalled, "2nd after hook was called")
	require.False(t, p2.runCalled, "2nd run was called")
	require.False(t, p2.cleanupCalled, "2nd cleanup was called")
}
0707010000007A000081A40000000000000000000000016842976900000C6D000000000000000000000000000000000000002500000000k0sctl-0.25.1/phase/prepare_hosts.gopackage phase

import (
	"context"
	"errors"
	"fmt"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

var iptablesEmbeddedSince = version.MustParse("v1.22.1+k0s.0")

// PrepareHosts installs required packages and so on on the hosts.
type PrepareHosts struct {
	GenericPhase
}

// Title for the phase
func (p *PrepareHosts) Title() string {
	return "Prepare hosts"
}

// Run the phase
func (p *PrepareHosts) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.Config.Spec.Hosts, p.prepareHost)
}

type prepare interface {
	Prepare(os.Host) error
}

// updateEnvironment updates the environment variables on the host and reconnects to
// it if necessary.
func (p *PrepareHosts) updateEnvironment(ctx context.Context, h *cluster.Host) error {
	if err := h.Configurer.UpdateEnvironment(h, h.Environment); err != nil {
		return err
	}
	if h.Connection.Protocol() != "SSH" {
		return nil
	}
	// XXX: this is a workaround. UpdateEnvironment on rig's os/linux.go writes
	// the environment to /etc/environment and then exports the same variables
	// using 'export' command. This is not enough for the environment to be
	// preserved across multiple ssh sessions. We need to write the environment
	// and then reopen the ssh session. Go's ssh client.Setenv() depends on ssh
	// server configuration (sshd only accepts LC_* variables by default).
	log.Infof("%s: reconnecting to apply new environment", h)
	h.Disconnect()
	return retry.AdaptiveTimeout(ctx, 10*time.Minute, func(_ context.Context) error {
		if err := h.Connect(); err != nil {
			if errors.Is(err, rig.ErrCantConnect) || strings.Contains(err.Error(), "host key mismatch") {
				return errors.Join(retry.ErrAbort, err)
			}
			return fmt.Errorf("failed to reconnect to %s: %w", h, err)
		}
		return nil
	})
}

func (p *PrepareHosts) prepareHost(ctx context.Context, h *cluster.Host) error {
	if c, ok := h.Configurer.(prepare); ok {
		if err := c.Prepare(h); err != nil {
			return err
		}
	}

	if len(h.Environment) > 0 {
		log.Infof("%s: updating environment", h)
		if err := p.updateEnvironment(ctx, h); err != nil {
			return fmt.Errorf("failed to updated environment: %w", err)
		}
	}

	var pkgs []string

	if h.NeedCurl() {
		pkgs = append(pkgs, "curl")
	}

	// iptables is only required for very old versions of k0s
	if p.Config.Spec.K0s.Version != nil && !p.Config.Spec.K0s.Version.GreaterThanOrEqual(iptablesEmbeddedSince) && h.NeedIPTables() { //nolint:staticcheck
		pkgs = append(pkgs, "iptables")
	}

	if h.NeedInetUtils() {
		pkgs = append(pkgs, "inetutils")
	}

	for _, pkg := range pkgs {
		err := p.Wet(h, fmt.Sprintf("install package %s", pkg), func() error {
			log.Infof("%s: installing package %s", h, pkg)
			return h.Configurer.InstallPackage(h, pkg)
		})
		if err != nil {
			return err
		}
	}

	if h.Configurer.IsContainer(h) {
		log.Infof("%s: is a container, applying a fix", h)
		if err := h.Configurer.FixContainer(h); err != nil {
			return err
		}
	}

	return nil
}
0707010000007B000081A40000000000000000000000016842976900000C44000000000000000000000000000000000000002100000000k0sctl-0.25.1/phase/reinstall.gopackage phase

import (
	"context"
	"fmt"
	"math"
	"strings"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

type Reinstall struct {
	GenericPhase
	hosts cluster.Hosts
}

// Title for the phase
func (p *Reinstall) Title() string {
	return "Reinstall"
}

// Prepare the phase
func (p *Reinstall) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		return !h.Metadata.K0sInstalled && h.Metadata.K0sRunningVersion != nil && !h.Reset && h.FlagsChanged()
	})

	return nil
}

// ShouldRun is true when there are hosts that needs to be reinstalled
func (p *Reinstall) ShouldRun() bool {
	return p.Config.Spec.K0s.Version.GreaterThanOrEqual(cluster.K0sForceFlagSince) && len(p.hosts) > 0
}

// Run the phase
func (p *Reinstall) Run(ctx context.Context) error {
	if p.Config.Spec.K0s.Version.LessThan(cluster.K0sForceFlagSince) {
		log.Warnf("k0s version %s does not support install --force flag, installFlags won't be reconfigured", p.Config.Spec.K0s.Version)
		return nil
	}
	controllers := p.hosts.Controllers()
	if len(controllers) > 0 {
		log.Infof("Reinstalling %d controllers sequentially", len(controllers))
		err := controllers.Each(ctx, func(ctx context.Context, h *cluster.Host) error {
			return p.reinstall(ctx, h)
		})
		if err != nil {
			return err
		}
	}

	workers := p.hosts.Workers()
	if len(workers) == 0 {
		return nil
	}

	concurrentReinstalls := int(math.Floor(float64(len(p.hosts)) * 0.10))
	if concurrentReinstalls == 0 {
		concurrentReinstalls = 1
	}

	log.Infof("Reinstalling max %d workers in parallel", concurrentReinstalls)

	return p.hosts.BatchedParallelEach(ctx, concurrentReinstalls, p.reinstall)
}

func (p *Reinstall) reinstall(ctx context.Context, h *cluster.Host) error {
	if p.Config.Spec.K0s.DynamicConfig && h.Role != "worker" {
		h.InstallFlags.AddOrReplace("--enable-dynamic-config")
	}

	h.InstallFlags.AddOrReplace("--force=true")

	cmd, err := h.K0sInstallCommand()
	if err != nil {
		return err
	}
	log.Infof("%s: reinstalling k0s", h)
	err = p.Wet(h, fmt.Sprintf("reinstall k0s using `%s", strings.ReplaceAll(cmd, h.Configurer.K0sBinaryPath(), "k0s")), func() error {
		if err := h.Exec(cmd, exec.Sudo(h)); err != nil {
			return fmt.Errorf("failed to reinstall k0s: %w", err)
		}
		return nil
	})
	if err != nil {
		return err
	}

	err = p.Wet(h, "restart k0s service", func() error {
		if err := h.Configurer.RestartService(h, h.K0sServiceName()); err != nil {
			return fmt.Errorf("failed to restart k0s: %w", err)
		}
		log.Infof("%s: waiting for the k0s service to start", h)
		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceRunningFunc(h, h.K0sServiceName())); err != nil {
			return fmt.Errorf("k0s did not restart: %w", err)
		}
		return nil
	})
	if err != nil {
		return fmt.Errorf("restart after reinstall: %w", err)
	}

	return nil
}
0707010000007C000081A40000000000000000000000016842976900001158000000000000000000000000000000000000002900000000k0sctl-0.25.1/phase/reset_controllers.gopackage phase

import (
	"bytes"
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// ResetControllers phase removes controllers marked for reset from the kubernetes and etcd clusters
// and resets k0s on the host
type ResetControllers struct {
	GenericPhase

	NoDrain  bool
	NoDelete bool
	NoLeave  bool

	hosts  cluster.Hosts
	leader *cluster.Host
}

// Title for the phase
func (p *ResetControllers) Title() string {
	return "Reset controllers"
}

// Prepare the phase
func (p *ResetControllers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()

	var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers()
	log.Debugf("%d controllers in total", len(controllers))
	p.hosts = controllers.Filter(func(h *cluster.Host) bool {
		return h.Reset
	})
	log.Debugf("ResetControllers phase prepared, %d controllers will be reset", len(p.hosts))
	return nil
}

// ShouldRun is true when there are controllers that needs to be reset
func (p *ResetControllers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// DryRun reports nodes that would get reset
func (p *ResetControllers) DryRun() error {
	for _, h := range p.hosts {
		p.DryMsg(h, "reset node")
	}
	return nil
}

// Run the phase
func (p *ResetControllers) Run(ctx context.Context) error {
	for _, h := range p.hosts {
		if t := p.Config.Spec.Options.EvictTaint; t.Enabled && t.ControllerWorkers && h.Role != "controller" {
			log.Debugf("%s: add taint: %s", h, t.String())
			if err := p.leader.AddTaint(h, t.String()); err != nil {
				return fmt.Errorf("add taint: %w", err)
			}
		}
		if !p.NoDrain && h.Role != "controller" {
			log.Debugf("%s: draining node", h)
			if err := p.leader.DrainNode(
				&cluster.Host{
					Metadata: cluster.HostMetadata{
						Hostname: h.Metadata.Hostname,
					},
				},
				p.Config.Spec.Options.Drain,
			); err != nil {
				log.Warnf("%s: failed to drain node: %s", h, err.Error())
			}
		}
		log.Debugf("%s: draining node completed", h)

		if !p.NoDelete && h.Role != "controller" {
			log.Debugf("%s: deleting node...", h)
			if err := p.leader.DeleteNode(&cluster.Host{
				Metadata: cluster.HostMetadata{
					Hostname: h.Metadata.Hostname,
				},
			}); err != nil {
				log.Warnf("%s: failed to delete node: %s", h, err.Error())
			}
		}

		if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) {
			log.Debugf("%s: stopping k0s...", h)
			if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to stop k0s: %s", h, err.Error())
			}
			log.Debugf("%s: waiting for k0s to stop", h)
			if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceStoppedFunc(h, h.K0sServiceName())); err != nil {
				log.Warnf("%s: failed to wait for k0s to stop: %v", h, err)
			}
			log.Debugf("%s: stopping k0s completed", h)
		}

		if !p.NoLeave {
			log.Debugf("%s: leaving etcd...", h)

			if err := h.Exec(h.Configurer.K0sCmdf("etcd leave --peer-address %s --datadir %s", h.PrivateAddress, h.K0sDataDir()), exec.Sudo(h)); err != nil {
				log.Warnf("%s: failed to leave etcd: %s", h, err.Error())
			}
			log.Debugf("%s: leaving etcd completed", h)
		}

		log.Debugf("%s: resetting k0s...", h)
		var stdoutbuf, stderrbuf bytes.Buffer
		cmd, err := h.ExecStreams(h.Configurer.K0sCmdf("reset --data-dir=%s", h.K0sDataDir()), nil, &stdoutbuf, &stderrbuf, exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("failed to run k0s reset: %w", err)
		}
		if err := cmd.Wait(); err != nil {
			log.Warnf("%s: k0s reset reported failure: %s %s", h, stderrbuf.String(), stdoutbuf.String())
		}
		log.Debugf("%s: resetting k0s completed", h)

		log.Debugf("%s: removing config...", h)
		if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil {
			log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr)
		}
		log.Debugf("%s: removing config completed", h)

		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
			}
		}

		log.Infof("%s: reset", h)
	}
	return nil
}
0707010000007D000081A40000000000000000000000016842976900000AD5000000000000000000000000000000000000002400000000k0sctl-0.25.1/phase/reset_leader.gopackage phase

import (
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// ResetLeader phase removes the leader from the cluster and thus destroys the cluster
type ResetLeader struct {
	GenericPhase
	leader *cluster.Host
}

// Title for the phase
func (p *ResetLeader) Title() string {
	return "Reset leader"
}

// Prepare the phase
func (p *ResetLeader) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()
	return nil
}

// DryRun reports that the host will be reset
func (p *ResetLeader) DryRun() error {
	p.DryMsg(p.leader, "reset node")
	return nil
}

// Run the phase
func (p *ResetLeader) Run(ctx context.Context) error {
	if t := p.Config.Spec.Options.EvictTaint; t.Enabled && t.ControllerWorkers && p.leader.Role != "controller" {
		log.Debugf("%s: add taint %s", p.leader, t.String())
		if err := p.leader.AddTaint(p.leader, t.String()); err != nil {
			return fmt.Errorf("add taint: %w", err)
		}
	}

	if p.leader.Configurer.ServiceIsRunning(p.leader, p.leader.K0sServiceName()) {
		log.Debugf("%s: stopping k0s...", p.leader)
		if err := p.leader.Configurer.StopService(p.leader, p.leader.K0sServiceName()); err != nil {
			log.Warnf("%s: failed to stop k0s: %s", p.leader, err.Error())
		}
		log.Debugf("%s: waiting for k0s to stop", p.leader)
		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceStoppedFunc(p.leader, p.leader.K0sServiceName())); err != nil {
			log.Warnf("%s: k0s service stop: %s", p.leader, err.Error())
		}
		log.Debugf("%s: stopping k0s completed", p.leader)
	}

	log.Debugf("%s: resetting k0s...", p.leader)
	out, err := p.leader.ExecOutput(p.leader.K0sResetCommand(), exec.Sudo(p.leader))
	if err != nil {
		log.Debugf("%s: k0s reset failed: %s", p.leader, out)
		log.Warnf("%s: k0s reported failure: %v", p.leader, err)
	}
	log.Debugf("%s: resetting k0s completed", p.leader)

	log.Debugf("%s: removing config...", p.leader)
	if dErr := p.leader.Configurer.DeleteFile(p.leader, p.leader.Configurer.K0sConfigPath()); dErr != nil {
		log.Warnf("%s: failed to remove existing configuration %s: %s", p.leader, p.leader.Configurer.K0sConfigPath(), dErr)
	}
	log.Debugf("%s: removing config completed", p.leader)

	if len(p.leader.Environment) > 0 {
		if err := p.leader.Configurer.CleanupServiceEnvironment(p.leader, p.leader.K0sServiceName()); err != nil {
			log.Warnf("%s: failed to clean up service environment: %s", p.leader, err.Error())
		}
	}

	log.Infof("%s: reset", p.leader)

	return nil
}
0707010000007E000081A40000000000000000000000016842976900000F96000000000000000000000000000000000000002500000000k0sctl-0.25.1/phase/reset_workers.gopackage phase

import (
	"bytes"
	"context"
	"fmt"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// ResetControllers phase removes workers marked for reset from the kubernetes cluster
// and resets k0s on the host
type ResetWorkers struct {
	GenericPhase

	NoDrain  bool
	NoDelete bool

	hosts  cluster.Hosts
	leader *cluster.Host
}

// Title for the phase
func (p *ResetWorkers) Title() string {
	return "Reset workers"
}

// Prepare the phase
func (p *ResetWorkers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()

	var workers cluster.Hosts = p.Config.Spec.Hosts.Workers()
	log.Debugf("%d workers in total", len(workers))
	p.hosts = workers.Filter(func(h *cluster.Host) bool {
		return h.Reset
	})
	log.Debugf("ResetWorkers phase prepared, %d workers will be reset", len(p.hosts))
	return nil
}

// ShouldRun is true when there are workers that needs to be reset
func (p *ResetWorkers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// DryRun reports the nodes will be reset
func (p *ResetWorkers) DryRun() error {
	for _, h := range p.hosts {
		p.DryMsg(h, "node would be reset")
	}
	return nil
}

// Run the phase
func (p *ResetWorkers) Run(ctx context.Context) error {
	return p.parallelDo(ctx, p.hosts, func(_ context.Context, h *cluster.Host) error {
		if t := p.Config.Spec.Options.EvictTaint; t.Enabled {
			log.Debugf("%s: add taint: %s", h, t.String())
			if err := p.leader.AddTaint(h, t.String()); err != nil {
				return fmt.Errorf("add taint: %w", err)
			}
		}
		if !p.NoDrain {
			log.Debugf("%s: draining node", h)
			if err := p.leader.DrainNode(
				&cluster.Host{
					Metadata: cluster.HostMetadata{
						Hostname: h.Metadata.Hostname,
					},
				},
				p.Config.Spec.Options.Drain,
			); err != nil {
				log.Warnf("%s: failed to drain node: %s", h, err.Error())
			}
		}
		log.Debugf("%s: draining node completed", h)

		log.Debugf("%s: deleting node...", h)
		if !p.NoDelete {
			if err := p.leader.DeleteNode(&cluster.Host{
				Metadata: cluster.HostMetadata{
					Hostname: h.Metadata.Hostname,
				},
			}); err != nil {
				log.Warnf("%s: failed to delete node: %s", h, err.Error())
			}
		}
		log.Debugf("%s: deleting node", h)

		if h.Configurer.ServiceIsRunning(h, h.K0sServiceName()) {
			log.Debugf("%s: stopping k0s...", h)
			if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to stop k0s: %s", h, err.Error())
			}
			log.Debugf("%s: waiting for k0s to stop", h)
			if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceStoppedFunc(h, h.K0sServiceName())); err != nil {
				log.Warnf("%s: failed to wait for k0s to stop: %s", h, err.Error())
			}
			log.Debugf("%s: stopping k0s completed", h)
		}

		log.Debugf("%s: resetting k0s...", h)
		var stdoutbuf, stderrbuf bytes.Buffer
		cmd, err := h.ExecStreams(h.K0sResetCommand(), nil, &stdoutbuf, &stderrbuf, exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("failed to run k0s reset: %w", err)
		}
		if err := cmd.Wait(); err != nil {
			log.Warnf("%s: k0s reset reported failure: %s %s", h, stderrbuf.String(), stdoutbuf.String())
		}
		log.Debugf("%s: resetting k0s completed", h)

		log.Debugf("%s: removing config...", h)
		if dErr := h.Configurer.DeleteFile(h, h.Configurer.K0sConfigPath()); dErr != nil {
			log.Warnf("%s: failed to remove existing configuration %s: %s", h, h.Configurer.K0sConfigPath(), dErr)
		}
		log.Debugf("%s: removing config completed", h)

		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
			}
		}

		log.Infof("%s: reset", h)
		return nil
	})
}
0707010000007F000081A400000000000000000000000168429769000008D3000000000000000000000000000000000000001F00000000k0sctl-0.25.1/phase/restore.gopackage phase

import (
	"bytes"
	"context"
	"fmt"
	"path"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

type Restore struct {
	GenericPhase

	RestoreFrom string
	leader      *cluster.Host
}

// Title for the phase
func (p *Restore) Title() string {
	return "Restore cluster state"
}

// ShouldRun is true when there path to backup file
func (p *Restore) ShouldRun() bool {
	return p.RestoreFrom != "" && p.leader.Metadata.K0sRunningVersion == nil && !p.leader.Reset
}

// Prepare the phase
func (p *Restore) Prepare(config *v1beta1.Cluster) error {
	p.Config = config

	if p.RestoreFrom == "" {
		return nil
	}

	// defined in backup.go
	if p.Config.Spec.K0s.Version.LessThan(backupSinceVersion) {
		return fmt.Errorf("the version of k0s on the host does not support restoring backups")
	}

	p.leader = p.Config.Spec.K0sLeader()

	log.Tracef("restore leader: %s", p.leader)
	log.Tracef("restore leader state: %+v", p.leader.Metadata)
	return nil
}

// Run the phase
func (p *Restore) Run(_ context.Context) error {
	// Push the backup file to controller
	h := p.leader
	tmpDir, err := h.Configurer.TempDir(h)
	if err != nil {
		return err
	}
	dstFile := path.Join(tmpDir, "k0s_backup.tar.gz")
	if err := h.Upload(p.RestoreFrom, dstFile, 0o600, exec.LogError(true)); err != nil {
		return err
	}

	defer func() {
		if err := h.Configurer.DeleteFile(h, dstFile); err != nil {
			log.Warnf("%s: failed to remove backup file %s: %s", h, dstFile, err)
		}

		if err := h.Configurer.DeleteDir(h, tmpDir, exec.Sudo(h)); err != nil {
			log.Warnf("%s: failed to remove backup temp dir %s: %s", h, tmpDir, err)
		}
	}()

	// Run restore
	log.Infof("%s: restoring cluster state", h)
	var stdout, stderr bytes.Buffer
	cmd, err := h.ExecStreams(h.K0sRestoreCommand(dstFile), nil, &stdout, &stderr, exec.Sudo(h))
	if err != nil {
		return fmt.Errorf("run restore: %w", err)
	}

	if err := cmd.Wait(); err != nil {
		log.Debugf("%s: restore stdout: %s", h, stdout.String())
		log.Errorf("%s: restore failed: %s", h, stderr.String())
		return fmt.Errorf("restore failed: %w", err)
	}

	return nil
}
07070100000080000081A400000000000000000000000168429769000005DC000000000000000000000000000000000000002000000000k0sctl-0.25.1/phase/runhooks.gopackage phase

import (
	"context"
	"fmt"

	"golang.org/x/text/cases"
	"golang.org/x/text/language"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

var _ Phase = &RunHooks{}

// RunHooks phase runs a set of hooks configured for the host
type RunHooks struct {
	GenericPhase
	Action string
	Stage  string
	hosts  cluster.Hosts
}

// Title for the phase
func (p *RunHooks) Title() string {
	titler := cases.Title(language.AmericanEnglish)
	return fmt.Sprintf("Run %s %s Hooks", titler.String(p.Stage), titler.String(p.Action))
}

// Prepare digs out the hosts with steps from the config
func (p *RunHooks) Prepare(config *v1beta1.Cluster) error {
	p.hosts = config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		return len(h.Hooks.ForActionAndStage(p.Action, p.Stage)) > 0
	})

	return nil
}

// ShouldRun is true when there are hosts that need to be connected
func (p *RunHooks) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run does all the prep work on the hosts in parallel
func (p *RunHooks) Run(ctx context.Context) error {
	return p.hosts.ParallelEach(ctx, p.runHooksForHost)
}

func (p *RunHooks) runHooksForHost(_ context.Context, h *cluster.Host) error {
	steps := h.Hooks.ForActionAndStage(p.Action, p.Stage)
	for _, s := range steps {
		err := p.Wet(h, fmt.Sprintf("run hook: `%s`", s), func() error {
			return h.Exec(s)
		})
		if err != nil {
			return err
		}
	}
	return nil
}
07070100000081000081A4000000000000000000000001684297690000027D000000000000000000000000000000000000001E00000000k0sctl-0.25.1/phase/unlock.gopackage phase

import (
	"context"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	log "github.com/sirupsen/logrus"
)

// Unlock acquires an exclusive k0sctl lock on hosts
type Unlock struct {
	GenericPhase
	Cancel func()
}

// Prepare the phase
func (p *Unlock) Prepare(c *v1beta1.Cluster) error {
	p.Config = c
	if p.Cancel == nil {
		p.Cancel = func() {
			log.Fatalf("cancel function not defined")
		}
	}
	return nil
}

// Title for the phase
func (p *Unlock) Title() string {
	return "Release exclusive host lock"
}

// Run the phase
func (p *Unlock) Run(_ context.Context) error {
	p.Cancel()
	return nil
}
07070100000082000081A40000000000000000000000016842976900001692000000000000000000000000000000000000002B00000000k0sctl-0.25.1/phase/upgrade_controllers.gopackage phase

import (
	"context"
	"fmt"
	"slices"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// UpgradeControllers upgrades the controllers one-by-one
type UpgradeControllers struct {
	GenericPhase

	hosts cluster.Hosts
}

// Title for the phase
func (p *UpgradeControllers) Title() string {
	return "Upgrade controllers"
}

// Prepare the phase
func (p *UpgradeControllers) Prepare(config *v1beta1.Cluster) error {
	log.Debugf("UpgradeControllers phase prep starting")
	p.Config = config
	var controllers cluster.Hosts = p.Config.Spec.Hosts.Controllers()
	log.Debugf("%d controllers in total", len(controllers))
	p.hosts = controllers.Filter(func(h *cluster.Host) bool {
		if h.Metadata.K0sBinaryTempFile == "" {
			return false
		}
		return !h.Reset && h.Metadata.NeedsUpgrade
	})
	log.Debugf("UpgradeControllers phase prepared, %d controllers needs upgrade", len(p.hosts))
	return nil
}

// ShouldRun is true when there are controllers that needs to be upgraded
func (p *UpgradeControllers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// CleanUp cleans up the environment override files on hosts
func (p *UpgradeControllers) CleanUp() {
	for _, h := range p.hosts {
		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
			}
		}
	}
}

// Run the phase
func (p *UpgradeControllers) Run(ctx context.Context) error {
	for _, h := range p.hosts {
		if !h.Configurer.FileExist(h, h.Metadata.K0sBinaryTempFile) {
			return fmt.Errorf("k0s binary tempfile not found on host")
		}

		log.Infof("%s: starting upgrade", h)

		if t := p.Config.Spec.Options.EvictTaint; t.Enabled && t.ControllerWorkers && h.Role != "controller" {
			leader := p.Config.Spec.K0sLeader()
			err := p.Wet(leader, "apply taint to node", func() error {
				log.Warnf("%s: add taint %s on %s", leader, t.String(), h)
				if err := leader.AddTaint(h, t.String()); err != nil {
					return fmt.Errorf("add taint: %w", err)
				}
				log.Debugf("%s: wait for taint to be applied", h)
				err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, func(_ context.Context) error {
					taints, err := leader.Taints(h)
					if err != nil {
						return fmt.Errorf("get taints: %w", err)
					}
					if !slices.Contains(taints, t.String()) {
						return fmt.Errorf("taint %s not found", t.String())
					}
					return nil
				})
				return err
			})
			if err != nil {
				return fmt.Errorf("apply taint: %w", err)
			}
		}

		log.Debugf("%s: stop service", h)
		err := p.Wet(h, "stop k0s service", func() error {
			if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil {
				return err
			}
			if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceStoppedFunc(h, h.K0sServiceName())); err != nil {
				return fmt.Errorf("wait for k0s service stop: %w", err)
			}
			return nil
		})
		if err != nil {
			return err
		}

		log.Debugf("%s: update binary", h)
		err = p.Wet(h, "replace k0s binary", func() error {
			return h.UpdateK0sBinary(h.Metadata.K0sBinaryTempFile, p.Config.Spec.K0s.Version)
		})
		if err != nil {
			return err
		}

		if len(h.Environment) > 0 {
			log.Infof("%s: updating service environment", h)
			err := p.Wet(h, "update service environment", func() error {
				return h.Configurer.UpdateServiceEnvironment(h, h.K0sServiceName(), h.Environment)
			})
			if err != nil {
				return err
			}
		}

		err = p.Wet(h, "reinstall k0s service", func() error {
			if p.Config.Spec.K0s.DynamicConfig {
				h.InstallFlags.AddOrReplace("--enable-dynamic-config")
			}

			h.InstallFlags.AddOrReplace("--force")

			cmd, err := h.K0sInstallCommand()
			if err != nil {
				return err
			}
			if err := h.Exec(cmd, exec.Sudo(h)); err != nil {
				return fmt.Errorf("failed to reinstall k0s: %w", err)
			}
			return nil
		})
		if err != nil {
			return err
		}
		h.Metadata.K0sInstalled = true

		log.Debugf("%s: restart service", h)
		err = p.Wet(h, "start k0s service with the new binary", func() error {
			if err := h.Configurer.StartService(h, h.K0sServiceName()); err != nil {
				return err
			}
			log.Infof("%s: waiting for the k0s service to start", h)
			if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceRunningFunc(h, h.K0sServiceName())); err != nil {
				return fmt.Errorf("k0s service start: %w", err)
			}
			return nil
		})
		if err != nil {
			return err
		}

		if p.IsWet() {
			err := retry.AdaptiveTimeout(ctx, 30*time.Second, func(_ context.Context) error {
				out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h))
				if err != nil {
					return fmt.Errorf("readiness endpoint reports %q: %w", out, err)
				}
				return nil
			})
			if err != nil {
				return fmt.Errorf("controller did not reach ready state: %w", err)
			}
		}

		if t := p.Config.Spec.Options.EvictTaint; t.Enabled && t.ControllerWorkers && h.Role != "controller" {
			leader := p.Config.Spec.K0sLeader()
			err := p.Wet(leader, "remove taint from node", func() error {
				log.Infof("%s: remove taint %s on %s", leader, t.String(), h)
				if err := leader.RemoveTaint(h, t.String()); err != nil {
					return fmt.Errorf("remove taint: %w", err)
				}
				return nil
			})
			if err != nil {
				log.Warnf("%s: failed to remove taint %s on %s: %s", leader, t.String(), h, err.Error())
			}
		}

		h.Metadata.K0sRunningVersion = p.Config.Spec.K0s.Version
	}

	return nil
}
07070100000083000081A4000000000000000000000001684297690000199A000000000000000000000000000000000000002700000000k0sctl-0.25.1/phase/upgrade_workers.gopackage phase

import (
	"context"
	"fmt"
	"math"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/k0sctl/pkg/node"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// UpgradeWorkers upgrades workers in batches
type UpgradeWorkers struct {
	GenericPhase

	NoDrain bool

	hosts  cluster.Hosts
	leader *cluster.Host
}

// Title for the phase
func (p *UpgradeWorkers) Title() string {
	return "Upgrade workers"
}

// Prepare the phase
func (p *UpgradeWorkers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.leader = p.Config.Spec.K0sLeader()
	var workers cluster.Hosts = p.Config.Spec.Hosts.Workers()
	log.Debugf("%d workers in total", len(workers))
	p.hosts = workers.Filter(func(h *cluster.Host) bool {
		if h.Metadata.K0sBinaryTempFile == "" {
			return false
		}
		return !h.Reset && h.Metadata.NeedsUpgrade
	})
	err := p.parallelDo(context.Background(), p.hosts, func(_ context.Context, h *cluster.Host) error {
		if !h.Configurer.FileExist(h, h.Metadata.K0sBinaryTempFile) {
			return fmt.Errorf("k0s binary tempfile not found on host")
		}
		return nil
	})
	if err != nil {
		return err
	}
	log.Debugf("UpgradeWorkers phase prepared, %d workers needs upgrade", len(p.hosts))

	return nil
}

// ShouldRun is true when there are workers that needs to be upgraded
func (p *UpgradeWorkers) ShouldRun() bool {
	return len(p.hosts) > 0
}

// CleanUp cleans up the environment override files on hosts
func (p *UpgradeWorkers) CleanUp() {
	if !p.IsWet() {
		return
	}
	_ = p.parallelDo(context.Background(), p.hosts, func(_ context.Context, h *cluster.Host) error {
		if len(h.Environment) > 0 {
			if err := h.Configurer.CleanupServiceEnvironment(h, h.K0sServiceName()); err != nil {
				log.Warnf("%s: failed to clean up service environment: %s", h, err.Error())
			}
		}
		_ = p.leader.UncordonNode(h)
		return nil
	})
}

// Run the phase
func (p *UpgradeWorkers) Run(ctx context.Context) error {
	// Upgrade worker hosts parallelly in 10% chunks
	concurrentUpgrades := int(math.Floor(float64(len(p.hosts)) * float64(p.Config.Spec.Options.Concurrency.WorkerDisruptionPercent/100)))
	if concurrentUpgrades == 0 {
		concurrentUpgrades = 1
	}
	concurrentUpgrades = min(concurrentUpgrades, p.Config.Spec.Options.Concurrency.Limit)

	log.Infof("Upgrading max %d workers in parallel", concurrentUpgrades)
	return p.hosts.BatchedParallelEach(ctx, concurrentUpgrades,
		p.start,
		p.cordonWorker,
		p.drainWorker,
		p.upgradeWorker,
		p.uncordonWorker,
		p.finish,
	)
}

func (p *UpgradeWorkers) cordonWorker(_ context.Context, h *cluster.Host) error {
	if p.NoDrain {
		log.Debugf("%s: not cordoning because --no-drain given", h)
		return nil
	}
	if !p.IsWet() {
		p.DryMsg(h, "cordon node")
		return nil
	}
	log.Debugf("%s: cordon", h)
	if err := p.leader.CordonNode(h); err != nil {
		return fmt.Errorf("cordon node: %w", err)
	}
	return nil
}

func (p *UpgradeWorkers) uncordonWorker(_ context.Context, h *cluster.Host) error {
	if !p.IsWet() {
		p.DryMsg(h, "uncordon node")
		if t := p.Config.Spec.Options.EvictTaint; t.Enabled {
			p.DryMsgf(h, "remove taint %s", t.String())
		}
		return nil
	}
	log.Debugf("%s: uncordon", h)
	if err := p.leader.UncordonNode(h); err != nil {
		return fmt.Errorf("uncordon node: %w", err)
	}
	if t := p.Config.Spec.Options.EvictTaint; t.Enabled {
		log.Debugf("%s: remove taint: %s", h, t.String())
		if err := p.leader.RemoveTaint(h, t.String()); err != nil {
			return fmt.Errorf("remove taint: %w", err)
		}
	}
	return nil
}

func (p *UpgradeWorkers) drainWorker(_ context.Context, h *cluster.Host) error {
	if p.NoDrain {
		log.Debugf("%s: not draining because --no-drain given", h)
		return nil
	}
	if t := p.Config.Spec.Options.EvictTaint; t.Enabled {
		log.Debugf("%s: add taint: %s", h, t.String())
		err := p.Wet(h, "add taint "+t.String(), func() error {
			if err := p.leader.AddTaint(h, t.String()); err != nil {
				return fmt.Errorf("add taint: %w", err)
			}
			return nil
		})
		if err != nil {
			return err
		}
	}
	if !p.IsWet() {
		p.DryMsg(h, "drain node")
		return nil
	}
	log.Debugf("%s: drain", h)
	if err := p.leader.DrainNode(h, p.Config.Spec.Options.Drain); err != nil {
		return fmt.Errorf("drain node: %w", err)
	}
	return nil
}

func (p *UpgradeWorkers) start(_ context.Context, h *cluster.Host) error {
	log.Infof("%s: starting upgrade", h)
	return nil
}

func (p *UpgradeWorkers) finish(_ context.Context, h *cluster.Host) error {
	log.Infof("%s: upgrade finished", h)
	return nil
}

func (p *UpgradeWorkers) upgradeWorker(ctx context.Context, h *cluster.Host) error {
	log.Debugf("%s: stop service", h)
	err := p.Wet(h, "stop k0s service", func() error {
		if err := h.Configurer.StopService(h, h.K0sServiceName()); err != nil {
			return err
		}

		if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.ServiceStoppedFunc(h, h.K0sServiceName())); err != nil {
			return err
		}

		return nil
	})
	if err != nil {
		return err
	}

	log.Debugf("%s: update binary", h)
	err = p.Wet(h, "replace k0s binary", func() error {
		return h.UpdateK0sBinary(h.Metadata.K0sBinaryTempFile, p.Config.Spec.K0s.Version)
	})
	if err != nil {
		return err
	}

	if len(h.Environment) > 0 {
		log.Infof("%s: updating service environment", h)
		err := p.Wet(h, "update service environment", func() error {
			return h.Configurer.UpdateServiceEnvironment(h, h.K0sServiceName(), h.Environment)
		})
		if err != nil {
			return err
		}
	}

	err = p.Wet(h, "reinstall k0s service", func() error {
		h.InstallFlags.AddOrReplace("--force")

		cmd, err := h.K0sInstallCommand()
		if err != nil {
			return err
		}
		if err := h.Exec(cmd, exec.Sudo(h)); err != nil {
			return fmt.Errorf("failed to reinstall k0s: %w", err)
		}
		return nil
	})
	if err != nil {
		return err
	}
	h.Metadata.K0sInstalled = true

	log.Debugf("%s: restart service", h)
	err = p.Wet(h, "restart k0s service", func() error {
		if err := h.Configurer.StartService(h, h.K0sServiceName()); err != nil {
			return err
		}
		if NoWait {
			log.Debugf("%s: not waiting because --no-wait given", h)
		} else {
			log.Infof("%s: waiting for node to become ready again", h)
			if err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, node.KubeNodeReadyFunc(h)); err != nil {
				return fmt.Errorf("node did not become ready: %w", err)
			}
		}
		return nil
	})
	if err != nil {
		return err
	}

	h.Metadata.Ready = true
	return nil
}
07070100000084000081A40000000000000000000000016842976900000A40000000000000000000000000000000000000002200000000k0sctl-0.25.1/phase/upload_k0s.gopackage phase

import (
	"context"
	"fmt"
	"os"
	"strconv"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"
	log "github.com/sirupsen/logrus"
)

// UploadK0s uploads k0s binaries from localhost to target
type UploadK0s struct {
	GenericPhase
	hosts cluster.Hosts
}

// Title for the phase
func (p *UploadK0s) Title() string {
	return "Upload k0s binaries to hosts"
}

// Prepare the phase
func (p *UploadK0s) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		// Nothing to upload
		if h.UploadBinaryPath == "" {
			return false
		}

		// No need to upload, host is going to be reset
		if h.Reset {
			return false
		}

		if !p.Config.Spec.K0s.Version.Equal(h.Metadata.K0sBinaryVersion) {
			log.Debugf("%s: k0s version on host is '%s'", h, h.Metadata.K0sBinaryVersion)
			return true
		}

		// If the file has been changed compared to local, re-upload and replace
		return h.FileChanged(h.UploadBinaryPath, h.Configurer.K0sBinaryPath())
	})
	return nil
}

// ShouldRun is true when there are hosts that need binary uploading
func (p *UploadK0s) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *UploadK0s) Run(ctx context.Context) error {
	return p.parallelDoUpload(ctx, p.hosts, p.uploadBinary)
}

func (p *UploadK0s) uploadBinary(_ context.Context, h *cluster.Host) error {
	tmp := h.Configurer.K0sBinaryPath() + ".tmp." + strconv.Itoa(int(time.Now().UnixNano()))

	stat, err := os.Stat(h.UploadBinaryPath)
	if err != nil {
		return fmt.Errorf("stat %s: %w", h.UploadBinaryPath, err)
	}

	log.Infof("%s: uploading k0s binary from %s to %s", h, h.UploadBinaryPath, tmp)
	if err := h.Upload(h.UploadBinaryPath, tmp, 0o600, exec.Sudo(h), exec.LogError(true)); err != nil {
		return fmt.Errorf("upload k0s binary: %w", err)
	}

	if err := h.Configurer.Touch(h, tmp, stat.ModTime(), exec.Sudo(h)); err != nil {
		return fmt.Errorf("failed to touch %s: %w", tmp, err)
	}
	if err := h.Execf(`chmod +x "%s"`, tmp, exec.Sudo(h)); err != nil {
		log.Warnf("%s: failed to chmod k0s temp binary: %v", h, err.Error())
	}

	h.Metadata.K0sBinaryTempFile = tmp

	return nil
}

// Cleanup removes the binary temp file if it wasn't used
func (p *UploadK0s) CleanUp() {
	_ = p.parallelDo(context.Background(), p.hosts, func(_ context.Context, h *cluster.Host) error {
		if h.Metadata.K0sBinaryTempFile != "" {
			_ = h.Configurer.DeleteFile(h, h.Metadata.K0sBinaryTempFile)
		}
		return nil
	})
}
07070100000085000081A400000000000000000000000168429769000015AE000000000000000000000000000000000000002300000000k0sctl-0.25.1/phase/uploadfiles.gopackage phase

import (
	"context"
	"fmt"
	"os"
	"path"

	"al.essio.dev/pkg/shellescape"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"

	log "github.com/sirupsen/logrus"
)

// UploadFiles implements a phase which upload files to hosts
type UploadFiles struct {
	GenericPhase

	hosts cluster.Hosts
}

// Title for the phase
func (p *UploadFiles) Title() string {
	return "Upload files to hosts"
}

// Prepare the phase
func (p *UploadFiles) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Filter(func(h *cluster.Host) bool {
		return !h.Reset && len(h.Files) > 0
	})

	return nil
}

// ShouldRun is true when there are workers
func (p *UploadFiles) ShouldRun() bool {
	return len(p.hosts) > 0
}

// Run the phase
func (p *UploadFiles) Run(ctx context.Context) error {
	return p.parallelDoUpload(ctx, p.Config.Spec.Hosts, p.uploadFiles)
}

func (p *UploadFiles) uploadFiles(ctx context.Context, h *cluster.Host) error {
	for _, f := range h.Files {
		if ctx.Err() != nil {
			return fmt.Errorf("upload canceled: %w", ctx.Err())
		}
		var err error
		if f.IsURL() {
			err = p.uploadURL(h, f)
		} else {
			err = p.uploadFile(h, f)
		}
		if err != nil {
			return err
		}
	}
	return nil
}

func (p *UploadFiles) ensureDir(h *cluster.Host, dir, perm, owner string) error {
	log.Debugf("%s: ensuring directory %s", h, dir)
	if h.Configurer.FileExist(h, dir) {
		return nil
	}

	err := p.Wet(h, fmt.Sprintf("create a directory for uploading: `mkdir -p \"%s\"`", dir), func() error {
		return h.Configurer.MkDir(h, dir, exec.Sudo(h))
	})
	if err != nil {
		return fmt.Errorf("failed to create directory %s: %w", dir, err)
	}

	if perm == "" {
		perm = "0755"
	}

	err = p.Wet(h, fmt.Sprintf("set permissions for directory %s to %s", dir, perm), func() error {
		return h.Configurer.Chmod(h, dir, perm, exec.Sudo(h))
	})
	if err != nil {
		return fmt.Errorf("failed to set permissions for directory %s: %w", dir, err)
	}

	if owner != "" {
		err = p.Wet(h, fmt.Sprintf("set owner for directory %s to %s", dir, owner), func() error {
			return h.Execf(`chown "%s" "%s"`, owner, dir, exec.Sudo(h))
		})
		if err != nil {
			return err
		}
	}

	return nil
}

func (p *UploadFiles) uploadFile(h *cluster.Host, f *cluster.UploadFile) error {
	log.Infof("%s: uploading %s", h, f)
	numfiles := len(f.Sources)

	for i, s := range f.Sources {
		dest := f.DestinationFile
		if dest == "" {
			dest = path.Join(f.DestinationDir, s.Path)
		}

		src := path.Join(f.Base, s.Path)
		if numfiles > 1 {
			log.Infof("%s: uploading file %s => %s (%d of %d)", h, src, dest, i+1, numfiles)
		}

		owner := f.Owner()

		if err := p.ensureDir(h, path.Dir(dest), f.DirPermString, owner); err != nil {
			return err
		}

		if h.FileChanged(src, dest) {
			err := p.Wet(h, fmt.Sprintf("upload file %s => %s", src, dest), func() error {
				stat, err := os.Stat(src)
				if err != nil {
					return fmt.Errorf("failed to stat local file %s: %w", src, err)
				}
				return h.Upload(path.Join(f.Base, s.Path), dest, stat.Mode(), exec.Sudo(h), exec.LogError(true))
			})
			if err != nil {
				return err
			}
		} else {
			log.Infof("%s: file already exists and hasn't been changed, skipping upload", h)
		}

		if owner != "" {
			err := p.Wet(h, fmt.Sprintf("set owner for %s to %s", dest, owner), func() error {
				log.Debugf("%s: setting owner %s for %s", h, owner, dest)
				return h.Execf(`chown %s %s`, shellescape.Quote(owner), shellescape.Quote(dest), exec.Sudo(h))
			})
			if err != nil {
				return err
			}
		}
		err := p.Wet(h, fmt.Sprintf("set permissions for %s to %s", dest, s.PermMode), func() error {
			log.Debugf("%s: setting permissions %s for %s", h, s.PermMode, dest)
			return h.Configurer.Chmod(h, dest, s.PermMode, exec.Sudo(h))
		})
		if err != nil {
			return err
		}
		stat, err := os.Stat(src)
		if err != nil {
			return fmt.Errorf("failed to stat %s: %s", src, err)
		}
		err = p.Wet(h, fmt.Sprintf("set timestamp for %s to %s", dest, stat.ModTime()), func() error {
			log.Debugf("%s: touching %s", h, dest)
			return h.Configurer.Touch(h, dest, stat.ModTime(), exec.Sudo(h))
		})
		if err != nil {
			return fmt.Errorf("failed to touch %s: %w", dest, err)
		}
	}

	return nil
}

func (p *UploadFiles) uploadURL(h *cluster.Host, f *cluster.UploadFile) error {
	log.Infof("%s: downloading %s to host %s", h, f, f.DestinationFile)
	owner := f.Owner()

	if err := p.ensureDir(h, path.Dir(f.DestinationFile), f.DirPermString, owner); err != nil {
		return err
	}

	expandedURL := h.ExpandTokens(f.Source, p.Config.Spec.K0s.Version)
	err := p.Wet(h, fmt.Sprintf("download file %s => %s", expandedURL, f.DestinationFile), func() error {
		return h.Configurer.DownloadURL(h, expandedURL, f.DestinationFile, exec.Sudo(h))
	})
	if err != nil {
		return err
	}

	if f.PermString != "" {
		err := p.Wet(h, fmt.Sprintf("set permissions for %s to %s", f.DestinationFile, f.PermString), func() error {
			return h.Configurer.Chmod(h, f.DestinationFile, f.PermString, exec.Sudo(h))
		})
		if err != nil {
			return err
		}
	}

	if owner != "" {
		err := p.Wet(h, fmt.Sprintf("set owner for %s to %s", f.DestinationFile, owner), func() error {
			log.Debugf("%s: setting owner %s for %s", h, owner, f.DestinationFile)
			return h.Execf(`chown %s %s`, shellescape.Quote(owner), shellescape.Quote(f.DestinationFile), exec.Sudo(h))
		})
		if err != nil {
			return err
		}
	}

	return nil
}
07070100000086000081A40000000000000000000000016842976900000BD5000000000000000000000000000000000000002D00000000k0sctl-0.25.1/phase/validate_etcd_members.gopackage phase

import (
	"context"
	"fmt"
	"slices"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	log "github.com/sirupsen/logrus"
)

// ValidateEtcdMembers checks for existing etcd members with the same IP as a new controller
type ValidateEtcdMembers struct {
	GenericPhase
	hosts cluster.Hosts
}

// Title for the phase
func (p *ValidateEtcdMembers) Title() string {
	return "Validate etcd members"
}

// Prepare the phase
func (p *ValidateEtcdMembers) Prepare(config *v1beta1.Cluster) error {
	p.Config = config
	p.hosts = p.Config.Spec.Hosts.Controllers().Filter(func(h *cluster.Host) bool {
		return h.Metadata.K0sRunningVersion == nil // only check new controllers
	})

	return nil
}

// ShouldRun is true when there are new controllers and etcd
func (p *ValidateEtcdMembers) ShouldRun() bool {
	if p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion == nil {
		log.Debugf("%s: leader has no k0s running, assuming a fresh cluster", p.Config.Spec.K0sLeader())
		return false
	}

	if p.Config.Spec.K0sLeader().Role == "single" {
		log.Debugf("%s: leader is a single node, assuming no etcd", p.Config.Spec.K0sLeader())
		return false
	}

	if s := p.Config.StorageType(); s != "etcd" {
		log.Debugf("%s: storage type is %q, not k0s managed etcd", p.Config.Spec.K0sLeader(), s)
	}

	return len(p.hosts) > 0
}

// Run the phase
func (p *ValidateEtcdMembers) Run(_ context.Context) error {
	if err := p.validateControllerSwap(); err != nil {
		return err
	}

	return nil
}

func (p *ValidateEtcdMembers) validateControllerSwap() error {
	if len(p.Config.Metadata.EtcdMembers) > len(p.Config.Spec.Hosts.Controllers()) {
		log.Warnf("there are more etcd members in the cluster than controllers listed in the configuration")
	}

	for _, h := range p.hosts {
		log.Debugf("%s: host is new, checking if etcd members list already contains %s", h, h.PrivateAddress)
		if slices.Contains(p.Config.Metadata.EtcdMembers, h.PrivateAddress) {
			if Force {
				log.Infof("%s: force used, running 'k0s etcd leave' for the host", h)
				leader := p.Config.Spec.K0sLeader()
				leaveCommand := leader.Configurer.K0sCmdf("etcd leave --peer-address %s", h.PrivateAddress)
				err := p.Wet(h, fmt.Sprintf("remove host from etcd using %v", leaveCommand), func() error {
					return leader.Exec(leaveCommand)
				})
				if err != nil {
					return fmt.Errorf("controller %s is listed as an existing etcd member but k0s is not found installed on it, the host may have been replaced. attempted etcd leave for the address %s but it failed: %w", h, h.PrivateAddress, err)
				}
				continue
			}
			return fmt.Errorf("controller %s is listed as an existing etcd member but k0s is not found installed on it, the host may have been replaced. check the host and use `k0s etcd leave --peer-address %s on a controller or re-run apply with --force", h, h.PrivateAddress)
		}
		log.Debugf("%s: no match, assuming its safe to install", h)
	}

	return nil
}
07070100000087000081A40000000000000000000000016842976900000769000000000000000000000000000000000000002600000000k0sctl-0.25.1/phase/validate_facts.gopackage phase

import (
	"context"
	"fmt"

	log "github.com/sirupsen/logrus"
)

// ValidateFacts performs remote OS detection
type ValidateFacts struct {
	GenericPhase
	SkipDowngradeCheck bool
}

// Title for the phase
func (p *ValidateFacts) Title() string {
	return "Validate facts"
}

// Run the phase
func (p *ValidateFacts) Run(_ context.Context) error {
	if err := p.validateDowngrade(); err != nil {
		return err
	}

	if err := p.validateDefaultVersion(); err != nil {
		return err
	}

	return nil
}

func (p *ValidateFacts) validateDowngrade() error {
	if p.SkipDowngradeCheck {
		return nil
	}

	if p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion == nil || p.Config.Spec.K0s.Version == nil {
		return nil
	}

	if p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion.GreaterThan(p.Config.Spec.K0s.Version) {
		return fmt.Errorf("can't perform a downgrade: %s > %s", p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion, p.Config.Spec.K0s.Version)
	}

	return nil
}

func (p *ValidateFacts) validateDefaultVersion() error {
	// Only check when running with a defaulted version
	if !p.Config.Spec.K0s.Metadata.VersionDefaulted {
		return nil
	}

	// Installing a fresh latest is ok
	if p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion == nil {
		return nil
	}

	// Upgrading should not be performed if the config version was defaulted
	if p.Config.Spec.K0s.Version.GreaterThan(p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion) {
		log.Warnf("spec.k0s.version was automatically defaulted to %s but the cluster is running %s", p.Config.Spec.K0s.Version, p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion)
		log.Warnf("to perform an upgrade, set the k0s version in the configuration explicitly")
		p.Config.Spec.K0s.Version = p.Config.Spec.K0sLeader().Metadata.K0sRunningVersion
		for _, h := range p.Config.Spec.Hosts {
			h.Metadata.NeedsUpgrade = false
		}
	}

	return nil
}
07070100000088000081A400000000000000000000000168429769000015A4000000000000000000000000000000000000002600000000k0sctl-0.25.1/phase/validate_hosts.gopackage phase

import (
	"context"
	"fmt"
	"io/fs"
	"path/filepath"
	"sort"
	"sync"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	log "github.com/sirupsen/logrus"
)

// ValidateHosts performs remote OS detection
type ValidateHosts struct {
	GenericPhase
	hncount          map[string]int
	machineidcount   map[string]int
	privateaddrcount map[string]int
}

// Title for the phase
func (p *ValidateHosts) Title() string {
	return "Validate hosts"
}

// Run the phase
func (p *ValidateHosts) Run(ctx context.Context) error {
	p.hncount = make(map[string]int, len(p.Config.Spec.Hosts))
	if p.Config.Spec.K0s.Version.LessThan(uniqueMachineIDSince) {
		p.machineidcount = make(map[string]int, len(p.Config.Spec.Hosts))
	}
	p.privateaddrcount = make(map[string]int, len(p.Config.Spec.Hosts))

	controllerCount := len(p.Config.Spec.Hosts.Controllers())
	var resetControllerCount int
	for _, h := range p.Config.Spec.Hosts {
		p.hncount[h.Metadata.Hostname]++
		if p.machineidcount != nil {
			p.machineidcount[h.Metadata.MachineID]++
		}
		if h.PrivateAddress != "" {
			p.privateaddrcount[h.PrivateAddress]++
		}
		if h.IsController() && h.Reset {
			resetControllerCount++
		}
	}

	if resetControllerCount >= controllerCount {
		return fmt.Errorf("all controllers are marked to be reset - this will break the cluster. use `k0sctl reset` instead if that is intentional")
	}

	err := p.parallelDo(
		ctx,
		p.Config.Spec.Hosts,
		p.warnK0sBinaryPath,
		p.validateUniqueHostname,
		p.validateUniqueMachineID,
		p.validateUniquePrivateAddress,
		p.validateSudo,
		p.cleanUpOldK0sTmpFiles,
	)
	if err != nil {
		return err
	}
	return p.validateClockSkew(ctx)
}

func (p *ValidateHosts) warnK0sBinaryPath(_ context.Context, h *cluster.Host) error {
	if h.K0sBinaryPath != "" {
		log.Warnf("%s: k0s binary path is set to %q, version checking for the host is disabled. The k0s version for other hosts is %s.", h, h.K0sBinaryPath, p.Config.Spec.K0s.Version)
	}

	return nil
}

func (p *ValidateHosts) validateUniqueHostname(_ context.Context, h *cluster.Host) error {
	if p.hncount[h.Metadata.Hostname] > 1 {
		return fmt.Errorf("hostname is not unique: %s", h.Metadata.Hostname)
	}

	return nil
}

func (p *ValidateHosts) validateUniquePrivateAddress(_ context.Context, h *cluster.Host) error {
	if p.privateaddrcount[h.PrivateAddress] > 1 {
		return fmt.Errorf("privateAddress %q is not unique: %s", h.PrivateAddress, h.Metadata.Hostname)
	}

	return nil
}

func (p *ValidateHosts) validateUniqueMachineID(_ context.Context, h *cluster.Host) error {
	if p.machineidcount[h.Metadata.MachineID] > 1 {
		return fmt.Errorf("machine id %s is not unique: %s", h.Metadata.MachineID, h.Metadata.Hostname)
	}

	return nil
}

func (p *ValidateHosts) validateSudo(_ context.Context, h *cluster.Host) error {
	if err := h.Configurer.CheckPrivilege(h); err != nil {
		return err
	}

	return nil
}

const cleanUpOlderThan = 30 * time.Minute

// clean up any k0s.tmp.* files from K0sBinaryPath that are older than 30 minutes and warn if there are any that are newer than that
func (p *ValidateHosts) cleanUpOldK0sTmpFiles(_ context.Context, h *cluster.Host) error {
	err := fs.WalkDir(h.SudoFsys(), filepath.Join(filepath.Dir(h.Configurer.K0sBinaryPath()), "k0s.tmp.*"), func(path string, d fs.DirEntry, err error) error {
		if err != nil {
			log.Warnf("failed to walk k0s.tmp.* files in %s: %v", h.Configurer.K0sBinaryPath(), err)
			return nil
		}
		log.Debugf("%s: found k0s binary upload temporary file %s", h, path)
		info, err := d.Info()
		if err != nil {
			log.Warnf("%s: failed to get info for %s: %v", h, path, err)
			return nil
		}
		if time.Since(info.ModTime()) > cleanUpOlderThan {
			log.Warnf("%s: cleaning up old k0s binary upload temporary file %s", h, path)
			if err := h.Configurer.DeleteFile(h, path); err != nil {
				log.Warnf("%s: failed to delete %s: %v", h, path, err)
			}
			return nil
		}
		log.Warnf("%s: found k0s binary upload temporary file %s that is newer than %s", h, path, cleanUpOlderThan)
		return nil
	})
	if err != nil {
		log.Warnf("failed to walk k0s.tmp.* files in %s: %v", h.Configurer.K0sBinaryPath(), err)
	}
	return nil
}

const maxSkew = 30 * time.Second

func (p *ValidateHosts) validateClockSkew(ctx context.Context) error {
	log.Infof("validating clock skew")
	skews := make(map[*cluster.Host]time.Duration, len(p.Config.Spec.Hosts))
	var skewValues []time.Duration
	var mu sync.Mutex

	// Collect skews relative to local time
	err := p.parallelDo(ctx, p.Config.Spec.Hosts, func(_ context.Context, h *cluster.Host) error {
		remote, err := h.Configurer.SystemTime(h)
		if err != nil {
			return fmt.Errorf("failed to get time from %s: %w", h, err)
		}
		skew := time.Now().UTC().Sub(remote).Round(time.Second)
		mu.Lock()
		skews[h] = skew
		skewValues = append(skewValues, skew)
		mu.Unlock()
		return nil
	})
	if err != nil {
		return err
	}

	// Sort skews to find the median
	sort.Slice(skewValues, func(i, j int) bool { return skewValues[i] < skewValues[j] })
	median := skewValues[len(skewValues)/2]

	// Check if any skew exceeds the maxSkew relative to the median
	var foundExceeding int
	for h, skew := range skews {
		deviation := (skew - median).Abs()
		if deviation > maxSkew {
			log.Errorf("%s: clock skew of %.0f seconds exceeds the maximum of %.0f seconds", h, deviation.Seconds(), maxSkew.Seconds())
			foundExceeding++
		}
	}

	if foundExceeding > 0 {
		return fmt.Errorf("clock skew exceeds the maximum on %d hosts", foundExceeding)
	}

	return nil
}
07070100000089000081A40000000000000000000000016842976900000518000000000000000000000000000000000000002B00000000k0sctl-0.25.1/phase/validate_hosts_test.gopackage phase

import (
	"context"
	"testing"
	"time"

	cfg "github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/os"
	"github.com/stretchr/testify/require"
)

type mockconfigurer struct {
	cfg.Linux
	linux.Ubuntu
	skew time.Duration
}

func (c *mockconfigurer) SystemTime(_ os.Host) (time.Time, error) {
	return time.Now().Add(c.skew), nil
}

func TestValidateClockSkew(t *testing.T) {
	hosts := []*cluster.Host{
		{
			Configurer: &mockconfigurer{skew: -10 * time.Second},
		},
		{
			Configurer: &mockconfigurer{skew: 10 * time.Second},
		},
		{
			Configurer: &mockconfigurer{skew: 1},
		},
	}

	config := &v1beta1.Cluster{
		Spec: &cluster.Spec{
			Hosts: hosts,
		},
	}

	p := &ValidateHosts{
		GenericPhase: GenericPhase{
			Config:  config,
			manager: &Manager{},
		},
	}

	t.Run("clock skew success", func(t *testing.T) {
		require.NoError(t, p.validateClockSkew(context.Background()))
	})
	t.Run("clock skew failure", func(t *testing.T) {
		p.Config.Spec.Hosts[2].Configurer = &mockconfigurer{skew: time.Minute}
		require.Error(t, p.validateClockSkew(context.Background()))
	})
}
0707010000008A000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001200000000k0sctl-0.25.1/pkg0707010000008B000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001700000000k0sctl-0.25.1/pkg/apis0707010000008C000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002C00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io0707010000008D000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000003400000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta10707010000008E000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000003C00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster0707010000008F000081A40000000000000000000000016842976900000B8E000000000000000000000000000000000000003F00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster.gopackage v1beta1

import (
	"bytes"
	"fmt"

	"github.com/creasty/defaults"
	"github.com/jellydator/validation"
	"gopkg.in/yaml.v2"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

// APIVersion is the current api version
const APIVersion = "k0sctl.k0sproject.io/v1beta1"

// ClusterMetadata defines cluster metadata
type ClusterMetadata struct {
	Name        string            `yaml:"name" validate:"required" default:"k0s-cluster"`
	User        string            `yaml:"user" default:"admin"`
	Kubeconfig  string            `yaml:"-"`
	EtcdMembers []string          `yaml:"-"`
	Manifests   map[string][]byte `yaml:"-"`
}

// Cluster describes launchpad.yaml configuration
type Cluster struct {
	APIVersion string           `yaml:"apiVersion"`
	Kind       string           `yaml:"kind"`
	Metadata   *ClusterMetadata `yaml:"metadata"`
	Spec       *cluster.Spec    `yaml:"spec"`
}

// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml
func (c *Cluster) UnmarshalYAML(unmarshal func(interface{}) error) error {
	c.Metadata = &ClusterMetadata{}
	c.Spec = &cluster.Spec{}

	type clusterConfig Cluster
	yc := (*clusterConfig)(c)

	if err := unmarshal(yc); err != nil {
		return err
	}

	if err := defaults.Set(c); err != nil {
		return fmt.Errorf("failed to set defaults: %w", err)
	}

	return nil
}

// String renders the config as a string
func (c *Cluster) String() string {
	var buf bytes.Buffer
	enc := yaml.NewEncoder(&buf)
	if err := enc.Encode(c); err != nil {
		return "# error enconding cluster config: " + err.Error()
	}
	return buf.String()
}

// SetDefaults initializes default values
func (c *Cluster) SetDefaults() {
	if c.Metadata == nil {
		c.Metadata = &ClusterMetadata{}
	}
	if c.Spec == nil {
		c.Spec = &cluster.Spec{}
	}
	_ = defaults.Set(c.Metadata)
	_ = defaults.Set(c.Spec)
	if defaults.CanUpdate(c.APIVersion) {
		c.APIVersion = APIVersion
	}
	if defaults.CanUpdate(c.Kind) {
		c.Kind = "Cluster"
	}
}

// Validate performs a configuration sanity check
func (c *Cluster) Validate() error {
	validation.ErrorTag = "yaml"
	return validation.ValidateStruct(c,
		validation.Field(&c.APIVersion, validation.Required, validation.In(APIVersion).Error("must equal "+APIVersion)),
		validation.Field(&c.Kind, validation.Required, validation.In("cluster", "Cluster").Error("must equal Cluster")),
		validation.Field(&c.Spec),
	)
}

// StorageType returns the k0s storage type.
func (c *Cluster) StorageType() string {
	if c.Spec == nil {
		// default to etcd when there's no hosts or k0s spec, this should never happen.
		return "etcd"
	}

	if c.Spec.K0s != nil {
		if t := c.Spec.K0s.Config.DigString("spec", "storage", "type"); t != "" {
			// if storage type is set in k0s spec, return it
			return t
		}
	}

	if h := c.Spec.K0sLeader(); h != nil && h.Role == "single" {
		// default to "kine" on single node clusters
		return "kine"
	}

	// default to etcd otherwise
	return "etcd"
}
07070100000090000081A40000000000000000000000016842976900001461000000000000000000000000000000000000004500000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.gopackage cluster

import (
	"fmt"
	"strconv"
	"strings"

	"al.essio.dev/pkg/shellescape"
	"github.com/k0sproject/k0sctl/internal/shell"
)

// Flags is a slice of strings with added functions to ease manipulating lists of command-line flags
type Flags []string

// Add adds a flag regardless if it exists already or not
func (f *Flags) Add(s string) {
	if ns, err := shell.Unquote(s); err == nil {
		s = ns
	}
	*f = append(*f, s)
}

// Add a flag with a value
func (f *Flags) AddWithValue(key, value string) {
	if nv, err := shell.Unquote(value); err == nil {
		value = nv
	}
	*f = append(*f, key+"="+value)
}

// AddUnlessExist adds a flag unless one with the same prefix exists
func (f *Flags) AddUnlessExist(s string) {
	if ns, err := shell.Unquote(s); err == nil {
		s = ns
	}
	if f.Include(s) {
		return
	}
	f.Add(s)
}

// AddOrReplace replaces a flag with the same prefix or adds a new one if one does not exist
func (f *Flags) AddOrReplace(s string) {
	if ns, err := shell.Unquote(s); err == nil {
		s = ns
	}
	idx := f.Index(s)
	if idx > -1 {
		(*f)[idx] = s
		return
	}
	f.Add(s)
}

// Include returns true if a flag with a matching prefix can be found
func (f Flags) Include(s string) bool {
	return f.Index(s) > -1
}

// Index returns an index to a flag with a matching prefix
func (f Flags) Index(s string) int {
	if ns, err := shell.Unquote(s); err == nil {
		s = ns
	}
	var flag string
	sepidx := strings.IndexAny(s, "= ")
	if sepidx < 0 {
		flag = s
	} else {
		flag = s[:sepidx]
	}
	for i, v := range f {
		if v == s || strings.HasPrefix(v, flag+"=") || strings.HasPrefix(v, flag+" ") {
			return i
		}
	}
	return -1
}

// Get returns the full flag with the possible value such as "--san=10.0.0.1" or "" when not found
func (f Flags) Get(s string) string {
	idx := f.Index(s)
	if idx < 0 {
		return ""
	}
	return f[idx]
}

// GetValue returns the value part of a flag such as "10.0.0.1" for a flag like "--san=10.0.0.1"
func (f Flags) GetValue(s string) string {
	fl := f.Get(s)
	if fl == "" {
		return ""
	}
	if nfl, err := shell.Unquote(fl); err == nil {
		fl = nfl
	}

	idx := strings.IndexAny(fl, "= ")
	if idx < 0 {
		return ""
	}

	val := fl[idx+1:]

	return val
}

// GetValue returns the boolean value part of a flag such as true for a flag like "--san"
// If the flag is not defined returns false. If the flag is defined without a value, returns true
// If no value is set, returns true
func (f Flags) GetBoolean(s string) (bool, error) {
	idx := f.Index(s)
	if idx < 0 {
		return false, nil
	}

	fl := f.GetValue(s)
	if fl == "" {
		return true, nil
	}

	return strconv.ParseBool(fl)
}

// Delete removes a matching flag from the list
func (f *Flags) Delete(s string) {
	idx := f.Index(s)
	if idx < 0 {
		return
	}
	*f = append((*f)[:idx], (*f)[idx+1:]...)
}

// Merge takes the flags from another Flags and adds them to this one unless this already has that flag set
func (f *Flags) Merge(b Flags) {
	for _, flag := range b {
		f.AddUnlessExist(flag)
	}
}

// MergeOverwrite takes the flags from another Flags and adds or replaces them into this one
func (f *Flags) MergeOverwrite(b Flags) {
	for _, flag := range b {
		f.AddOrReplace(flag)
	}
}

// MergeAdd takes the flags from another Flags and adds them into this one even if they exist
func (f *Flags) MergeAdd(b Flags) {
	for _, flag := range b {
		f.Add(flag)
	}
}

// Join creates a string separated by spaces
func (f *Flags) Join() string {
	var parts []string
	f.Each(func(k, v string) {
		if v == "" && k != "" {
			parts = append(parts, shellescape.Quote(k))
		} else {
			parts = append(parts, fmt.Sprintf("%s=%s", k, shellescape.Quote(v)))
		}
	})
	return strings.Join(parts, " ")
}

// Each iterates over each flag and calls the function with the flag key and value as arguments
func (f Flags) Each(fn func(string, string)) {
	for _, flag := range f {
		sepidx := strings.IndexAny(flag, "= ")
		if sepidx < 0 {
			if flag == "" {
				continue
			}
			fn(flag, "")
		} else {
			key, value := flag[:sepidx], flag[sepidx+1:]
			if unq, err := shell.Unquote(value); err == nil {
				value = unq
			}
			fn(key, value)
		}
	}
}

// Map returns a map[string]string of the flags where the key is the flag and the value is the value
func (f Flags) Map() map[string]string {
	res := make(map[string]string)
	f.Each(func(k, v string) {
		res[k] = v
	})
	return res
}

// Equals compares the flags with another Flags and returns true if they have the same flags and values, ignoring order
func (f Flags) Equals(b Flags) bool {
	if len(f) != len(b) {
		return false
	}
	for _, flag := range f {
		if !b.Include(flag) {
			return false
		}
		ourValue := f.GetValue(flag)
		theirValue := b.GetValue(flag)
		if ourValue != theirValue {
			return false
		}
	}
	return true
}

// NewFlags shell-splits and parses a string and returns new Flags or an error if splitting fails
func NewFlags(s string) (Flags, error) {
	var flags Flags
	unq, err := shell.Unquote(s)
	if err != nil {
		return flags, fmt.Errorf("failed to unquote flags %q: %w", s, err)
	}
	parts, err := shell.Split(unq)
	if err != nil {
		return flags, fmt.Errorf("failed to split flags %q: %w", s, err)
	}
	for _, part := range parts {
		flags.Add(part)
	}
	return flags, nil
}
07070100000091000081A40000000000000000000000016842976900001044000000000000000000000000000000000000004A00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags_test.gopackage cluster

import (
	"testing"

	"github.com/stretchr/testify/require"
)

func TestFlags(t *testing.T) {
	flags := Flags{"--admin-username=foofoo", "--san foo", "--ucp-insecure-tls"}
	require.Equal(t, "--ucp-insecure-tls", flags[2])
	require.Equal(t, 0, flags.Index("--admin-username"))
	require.Equal(t, 1, flags.Index("--san"))
	require.Equal(t, 2, flags.Index("--ucp-insecure-tls"))
	require.True(t, flags.Include("--san"))

	flags.Delete("--san")
	require.Equal(t, 1, flags.Index("--ucp-insecure-tls"))
	require.False(t, flags.Include("--san"))

	flags.AddOrReplace("--san 10.0.0.1")
	require.Equal(t, 2, flags.Index("--san"))
	require.Equal(t, "--san 10.0.0.1", flags.Get("--san"))
	require.Equal(t, "10.0.0.1", flags.GetValue("--san"))
	require.Equal(t, "foofoo", flags.GetValue("--admin-username"))

	require.Len(t, flags, 3)
	flags.AddOrReplace("--admin-password=barbar")
	require.Equal(t, 3, flags.Index("--admin-password"))
	require.Equal(t, "barbar", flags.GetValue("--admin-password"))

	require.Len(t, flags, 4)
	flags.AddUnlessExist("--admin-password=borbor")
	require.Len(t, flags, 4)
	require.Equal(t, "barbar", flags.GetValue("--admin-password"))

	flags.AddUnlessExist("--help")
	require.Len(t, flags, 5)
	require.True(t, flags.Include("--help"))
}

func TestFlagsWithQuotes(t *testing.T) {
	flags := Flags{"--admin-username \"foofoo\"", "--admin-password=\"foobar\""}
	require.Equal(t, "foofoo", flags.GetValue("--admin-username"))
	require.Equal(t, "foobar", flags.GetValue("--admin-password"))
}

func TestString(t *testing.T) {
	flags := Flags{"--help", "--setting=false"}
	require.Equal(t, "--help --setting=false", flags.Join())
}

func TestGetBoolean(t *testing.T) {
	t.Run("Valid flags", func(t *testing.T) {
		testsValid := []struct {
			flag   string
			expect bool
		}{
			{"--flag", true},
			{"--flag=true", true},
			{"--flag=false", false},
			{"--flag=1", true},
			{"--flag=TRUE", true},
		}
		for _, test := range testsValid {
			flags := Flags{test.flag}
			result, err := flags.GetBoolean(test.flag)
			require.NoError(t, err)
			require.Equal(t, test.expect, result)

			flags = Flags{"--unrelated-flag1", "--unrelated-flag2=foo", test.flag}
			result, err = flags.GetBoolean(test.flag)
			require.NoError(t, err)
			require.Equal(t, test.expect, result)
		}
	})

	t.Run("Invalid flags", func(t *testing.T) {
		testsInvalid := []string{
			"--flag=foo",
			"--flag=2",
			"--flag=TrUe",
			"--flag=-4",
			"--flag=FalSe",
		}
		for _, test := range testsInvalid {
			flags := Flags{test}
			_, err := flags.GetBoolean(test)
			require.Error(t, err)

			flags = Flags{"--unrelated-flag1", "--unrelated-flag2=foo", test}
			_, err = flags.GetBoolean(test)
			require.Error(t, err)
		}
	})

	t.Run("Unknown flags", func(t *testing.T) {
		flags := Flags{"--flag1=1", "--flag2"}
		result, err := flags.GetBoolean("--flag3")
		require.NoError(t, err)
		require.Equal(t, result, false)
	})
}

func TestEach(t *testing.T) {
	flags := Flags{"--flag1", "--flag2=foo", "--flag3=bar"}
	var countF, countV int
	flags.Each(func(flag string, value string) {
		countF++
		if value != "" {
			countV++
		}
	})
	require.Equal(t, 3, countF)
	require.Equal(t, 2, countV)
}

func TestMap(t *testing.T) {
	flags := Flags{"--flag1", "--flag2=foo", "--flag3=bar"}
	m := flags.Map()
	require.Len(t, m, 3)
	require.Equal(t, "", m["--flag1"])
	require.Equal(t, "foo", m["--flag2"])
	require.Equal(t, "bar", m["--flag3"])
}

func TestEquals(t *testing.T) {
	flags1 := Flags{"--flag1", "--flag2=foo", "--flag3=bar"}
	flags2 := Flags{"--flag1", "--flag2=foo", "--flag3=bar"}
	require.True(t, flags1.Equals(flags2))

	flags2 = Flags{"--flag1", "--flag2=foo"}
	require.False(t, flags1.Equals(flags2))

	flags2 = Flags{"-f", "--flag2=foo", "--flag3=baz"}
	require.False(t, flags1.Equals(flags2))
}

func TestNewFlags(t *testing.T) {
	t.Run("basic", func(t *testing.T) {
		flags, err := NewFlags("--hello=world --bar=baz")
		require.NoError(t, err)
		require.Equal(t, "world", flags.GetValue("--hello"))
		require.Equal(t, "baz", flags.GetValue("--bar"))
	})
	t.Run("empty", func(t *testing.T) {
		_, err := NewFlags("")
		require.NoError(t, err)
	})
}
07070100000092000081A4000000000000000000000001684297690000015B000000000000000000000000000000000000004400000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hook.gopackage cluster

// Hooks define a list of hooks such as hooks["apply"]["before"] = ["ls -al", "rm foo.txt"]
type Hooks map[string]map[string][]string

// ForActionAndStage return hooks for given action and stage
func (h Hooks) ForActionAndStage(action, stage string) []string {
	if len(h[action]) > 0 {
		return h[action][stage]
	}
	return nil
}
07070100000093000081A40000000000000000000000016842976900005215000000000000000000000000000000000000004400000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.gopackage cluster

import (
	"fmt"
	"net/url"
	gos "os"
	gopath "path"
	"slices"
	"strings"
	"time"

	"al.essio.dev/pkg/shellescape"
	"github.com/creasty/defaults"
	"github.com/go-playground/validator/v10"
	"github.com/jellydator/validation"
	"github.com/jellydator/validation/is"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/rig/os/registry"
	"github.com/k0sproject/version"
	log "github.com/sirupsen/logrus"
)

var K0sForceFlagSince = version.MustParse("v1.27.4+k0s.0")

// Host contains all the needed details to work with hosts
type Host struct {
	rig.Connection `yaml:",inline"`

	Role             string            `yaml:"role"`
	Reset            bool              `yaml:"reset,omitempty"`
	PrivateInterface string            `yaml:"privateInterface,omitempty"`
	PrivateAddress   string            `yaml:"privateAddress,omitempty"`
	DataDir          string            `yaml:"dataDir,omitempty"`
	KubeletRootDir   string            `yaml:"kubeletRootDir,omitempty"`
	Environment      map[string]string `yaml:"environment,flow,omitempty"`
	UploadBinary     bool              `yaml:"uploadBinary,omitempty"`
	K0sBinaryPath    string            `yaml:"k0sBinaryPath,omitempty"`
	K0sDownloadURL   string            `yaml:"k0sDownloadURL,omitempty"`
	InstallFlags     Flags             `yaml:"installFlags,omitempty"`
	Files            []*UploadFile     `yaml:"files,omitempty"`
	OSIDOverride     string            `yaml:"os,omitempty"`
	HostnameOverride string            `yaml:"hostname,omitempty"`
	NoTaints         bool              `yaml:"noTaints,omitempty"`
	Hooks            Hooks             `yaml:"hooks,omitempty"`

	UploadBinaryPath string       `yaml:"-"`
	Metadata         HostMetadata `yaml:"-"`
	Configurer       configurer   `yaml:"-"`
}

func (h *Host) SetDefaults() {
	if h.OSIDOverride != "" {
		h.OSVersion = &rig.OSVersion{ID: h.OSIDOverride}
	}

	_ = defaults.Set(h.Connection)

	if h.InstallFlags.Get("--single") != "" && h.InstallFlags.GetValue("--single") != "false" && h.Role != "single" {
		log.Debugf("%s: changed role from '%s' to 'single' because of --single installFlag", h, h.Role)
		h.Role = "single"
	}
	if h.InstallFlags.Get("--enable-worker") != "" && h.InstallFlags.GetValue("--enable-worker") != "false" && h.Role != "controller+worker" {
		log.Debugf("%s: changed role from '%s' to 'controller+worker' because of --enable-worker installFlag", h, h.Role)
		h.Role = "controller+worker"
	}

	if h.InstallFlags.Get("--no-taints") != "" && h.InstallFlags.GetValue("--no-taints") != "false" {
		h.NoTaints = true
	}

	if dd := h.InstallFlags.GetValue("--data-dir"); dd != "" {
		if h.DataDir != "" {
			log.Debugf("%s: changed dataDir from '%s' to '%s' because of --data-dir installFlag", h, h.DataDir, dd)
		}
		h.InstallFlags.Delete("--data-dir")
		h.DataDir = dd
	}

	if krd := h.InstallFlags.GetValue("--kubelet-root-dir"); krd != "" {
		if h.KubeletRootDir != "" {
			log.Debugf("%s: changed kubeletRootDir from '%s' to '%s' because of --kubelet-root-dir installFlag", h, h.DataDir, krd)
		}
		h.InstallFlags.Delete("--kubelet-root-dir")
		h.KubeletRootDir = krd
	}
}

func validateBalancedQuotes(val any) error {
	s, ok := val.(string)
	if !ok {
		return fmt.Errorf("invalid type")
	}

	quoteCount := make(map[rune]int)

	for i, ch := range s {
		if i > 0 && s[i-1] == '\\' {
			continue
		}

		if ch == '\'' || ch == '"' {
			quoteCount[ch]++
		}
	}

	for _, count := range quoteCount {
		if count%2 != 0 {
			return fmt.Errorf("unbalanced quotes in %s", s)
		}
	}

	return nil
}

func (h *Host) Validate() error {
	// For rig validation
	v := validator.New()
	if err := v.Struct(h); err != nil {
		return err
	}

	return validation.ValidateStruct(h,
		validation.Field(&h.Role, validation.In("controller", "worker", "controller+worker", "single").Error("unknown role "+h.Role)),
		validation.Field(&h.PrivateAddress, is.IP),
		validation.Field(&h.Files),
		validation.Field(&h.NoTaints, validation.When(h.Role != "controller+worker", validation.NotIn(true).Error("noTaints can only be true for controller+worker role"))),
		validation.Field(&h.InstallFlags, validation.Each(validation.By(validateBalancedQuotes))),
	)
}

type configurer interface {
	Kind() string
	CheckPrivilege(os.Host) error
	StartService(os.Host, string) error
	StopService(os.Host, string) error
	RestartService(os.Host, string) error
	ServiceIsRunning(os.Host, string) bool
	Arch(os.Host) (string, error)
	K0sCmdf(string, ...interface{}) string
	K0sBinaryPath() string
	K0sBinaryVersion(os.Host) (*version.Version, error)
	K0sConfigPath() string
	DataDirDefaultPath() string
	K0sJoinTokenPath() string
	WriteFile(os.Host, string, string, string) error
	UpdateEnvironment(os.Host, map[string]string) error
	DaemonReload(os.Host) error
	ReplaceK0sTokenPath(os.Host, string) error
	ServiceScriptPath(os.Host, string) (string, error)
	ReadFile(os.Host, string) (string, error)
	FileExist(os.Host, string) bool
	Chmod(os.Host, string, string, ...exec.Option) error
	DownloadK0s(os.Host, string, *version.Version, string, ...exec.Option) error
	DownloadURL(os.Host, string, string, ...exec.Option) error
	InstallPackage(os.Host, ...string) error
	FileContains(os.Host, string, string) bool
	MoveFile(os.Host, string, string) error
	MkDir(os.Host, string, ...exec.Option) error
	DeleteFile(os.Host, string) error
	CommandExist(os.Host, string) bool
	Hostname(os.Host) string
	KubectlCmdf(os.Host, string, string, ...interface{}) string
	KubeconfigPath(os.Host, string) string
	IsContainer(os.Host) bool
	FixContainer(os.Host) error
	HTTPStatus(os.Host, string) (int, error)
	PrivateInterface(os.Host) (string, error)
	PrivateAddress(os.Host, string, string) (string, error)
	TempDir(os.Host) (string, error)
	TempFile(os.Host) (string, error)
	UpdateServiceEnvironment(os.Host, string, map[string]string) error
	CleanupServiceEnvironment(os.Host, string) error
	Stat(os.Host, string, ...exec.Option) (*os.FileInfo, error)
	Touch(os.Host, string, time.Time, ...exec.Option) error
	DeleteDir(os.Host, string, ...exec.Option) error
	K0sctlLockFilePath(os.Host) string
	UpsertFile(os.Host, string, string) error
	MachineID(os.Host) (string, error)
	SetPath(string, string)
	SystemTime(os.Host) (time.Time, error)
}

// HostMetadata resolved metadata for host
type HostMetadata struct {
	K0sBinaryVersion  *version.Version
	K0sBinaryTempFile string
	K0sRunningVersion *version.Version
	K0sInstalled      bool
	K0sExistingConfig string
	K0sNewConfig      string
	K0sTokenData      TokenData
	K0sStatusArgs     Flags
	Arch              string
	IsK0sLeader       bool
	Hostname          string
	Ready             bool
	NeedsUpgrade      bool
	MachineID         string
	DryRunFakeLeader  bool
}

// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml
func (h *Host) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type host Host
	yh := (*host)(h)

	yh.Environment = make(map[string]string)

	if err := unmarshal(yh); err != nil {
		return err
	}

	if h.SSH != nil && h.SSH.HostKey != "" {
		log.Warnf("%s: host.ssh.hostKey is deprecated, use a ssh known hosts file instead", h)
	}

	return defaults.Set(h)
}

// Address returns an address for the host
func (h *Host) Address() string {
	if addr := h.Connection.Address(); addr != "" {
		return addr
	}
	return "127.0.0.1"
}

// Protocol returns host communication protocol
func (h *Host) Protocol() string {
	if h.SSH != nil {
		return "ssh"
	}

	if h.WinRM != nil {
		return "winrm"
	}

	if h.Localhost != nil {
		return "local"
	}

	return "nil"
}

// ResolveConfigurer assigns a rig-style configurer to the Host (see configurer/)
func (h *Host) ResolveConfigurer() error {
	bf, err := registry.GetOSModuleBuilder(*h.OSVersion)
	if err != nil {
		return err
	}

	if c, ok := bf().(configurer); ok {
		h.Configurer = c

		return nil
	}

	return fmt.Errorf("unsupported OS")
}

// K0sJoinTokenPath returns the token file path from install flags or configurer
func (h *Host) K0sJoinTokenPath() string {
	if path := h.InstallFlags.GetValue("--token-file"); path != "" {
		return path
	}

	return h.Configurer.K0sJoinTokenPath()
}

// K0sConfigPath returns the config file path from install flags or configurer
func (h *Host) K0sConfigPath() string {
	if path := h.InstallFlags.GetValue("--config"); path != "" {
		return path
	}

	if path := h.InstallFlags.GetValue("-c"); path != "" {
		return path
	}

	return h.Configurer.K0sConfigPath()
}

func (h *Host) K0sRole() string {
	switch h.Role {
	case "controller+worker", "single":
		return "controller"
	default:
		return h.Role
	}
}

func (h *Host) K0sInstallFlags() (Flags, error) {
	flags := Flags(h.InstallFlags)

	flags.AddOrReplace(fmt.Sprintf("--data-dir=%s", shellescape.Quote(h.K0sDataDir())))

	if h.KubeletRootDir != "" {
		flags.AddOrReplace(fmt.Sprintf("--kubelet-root-dir=%s", shellescape.Quote(h.KubeletRootDir)))
	}

	switch h.Role {
	case "controller+worker":
		flags.AddUnlessExist("--enable-worker=true")
		if h.NoTaints {
			flags.AddUnlessExist("--no-taints=true")
		}
	case "single":
		flags.AddUnlessExist("--single=true")
	}

	if !h.Metadata.IsK0sLeader {
		flags.AddUnlessExist(fmt.Sprintf(`--token-file=%s`, shellescape.Quote(h.K0sJoinTokenPath())))
	}

	if h.IsController() {
		flags.AddUnlessExist(fmt.Sprintf(`--config=%s`, shellescape.Quote(h.K0sConfigPath())))
	}

	if strings.HasSuffix(h.Role, "worker") {
		var extra Flags
		if old := flags.GetValue("--kubelet-extra-args"); old != "" {
			ex, err := NewFlags(old)
			if err != nil {
				return flags, fmt.Errorf("failed to split kubelet-extra-args: %w", err)
			}
			extra = ex
		}
		// set worker's private address to --node-ip in --extra-kubelet-args if cloud ins't enabled
		enableCloudProvider, err := h.InstallFlags.GetBoolean("--enable-cloud-provider")
		if err != nil {
			return flags, fmt.Errorf("--enable-cloud-provider flag is set to invalid value: %s. (%v)", h.InstallFlags.GetValue("--enable-cloud-provider"), err)
		}
		if !enableCloudProvider && h.PrivateAddress != "" {
			extra.AddUnlessExist("--node-ip=" + h.PrivateAddress)
		}

		if h.HostnameOverride != "" {
			extra.AddOrReplace("--hostname-override=" + h.HostnameOverride)
		}
		if extra != nil {
			flags.AddOrReplace(fmt.Sprintf("--kubelet-extra-args=%s", shellescape.Quote(extra.Join())))
		}
	}

	if flags.Include("--force") && h.Metadata.K0sBinaryVersion != nil && h.Metadata.K0sBinaryVersion.LessThan(K0sForceFlagSince) {
		log.Warnf("%s: k0s version %s does not support the --force flag, ignoring it", h, h.Metadata.K0sBinaryVersion)
		flags.Delete("--force")
	}

	return flags, nil
}

// K0sInstallCommand returns a full command that will install k0s service with necessary flags
func (h *Host) K0sInstallCommand() (string, error) {
	flags, err := h.K0sInstallFlags()
	if err != nil {
		return "", err
	}

	return h.Configurer.K0sCmdf("install %s %s", h.K0sRole(), flags.Join()), nil
}

// K0sResetCommand returns a full command that will reset k0s
func (h *Host) K0sResetCommand() string {
	var flags Flags
	flags.Add(fmt.Sprintf("--data-dir=%s", shellescape.Quote(h.K0sDataDir())))
	if h.KubeletRootDir != "" {
		flags.Add(fmt.Sprintf("--kubelet-root-dir=%s", shellescape.Quote(h.KubeletRootDir)))
	}

	return h.Configurer.K0sCmdf("reset %s", flags.Join())
}

// K0sBackupCommand returns a full command to be used as run k0s backup
func (h *Host) K0sBackupCommand(targetDir string) string {
	return h.Configurer.K0sCmdf("backup --save-path %s --data-dir %s", shellescape.Quote(targetDir), h.K0sDataDir())
}

// K0sRestoreCommand returns a full command to restore cluster state from a backup
func (h *Host) K0sRestoreCommand(backupfile string) string {
	return h.Configurer.K0sCmdf("restore --data-dir=%s %s", h.K0sDataDir(), shellescape.Quote(backupfile))
}

// IsController returns true for controller and controller+worker roles
func (h *Host) IsController() bool {
	return h.Role == "controller" || h.Role == "controller+worker" || h.Role == "single"
}

// K0sServiceName returns correct service name
func (h *Host) K0sServiceName() string {
	switch h.Role {
	case "controller", "controller+worker", "single":
		return "k0scontroller"
	default:
		return "k0sworker"
	}
}

func (h *Host) k0sBinaryPathDir() string {
	return gopath.Dir(h.Configurer.K0sBinaryPath())
}

// InstallK0sBinary installs the k0s binary from the provided file path to K0sBinaryPath
func (h *Host) InstallK0sBinary(path string) error {
	if !h.Configurer.FileExist(h, path) {
		return fmt.Errorf("k0s binary tempfile not found")
	}

	dir := h.k0sBinaryPathDir()
	if err := h.Execf(`install -m 0755 -o root -g root -d "%s"`, dir, exec.Sudo(h)); err != nil {
		return fmt.Errorf("create k0s binary dir: %w", err)
	}

	if err := h.Execf(`install -m 0750 -o root -g root "%s" "%s"`, path, h.Configurer.K0sBinaryPath(), exec.Sudo(h)); err != nil {
		return fmt.Errorf("install k0s binary: %w", err)
	}

	if err := h.Configurer.DeleteFile(h, path); err != nil {
		log.Warnf("%s: failed to delete k0s binary tempfile: %s", h, err)
	}

	return nil
}

// UpdateK0sBinary updates the binary on the host from the provided file path
func (h *Host) UpdateK0sBinary(path string, version *version.Version) error {
	if err := h.InstallK0sBinary(path); err != nil {
		return fmt.Errorf("update k0s binary: %w", err)
	}

	updatedVersion, err := h.Configurer.K0sBinaryVersion(h)
	if err != nil {
		return fmt.Errorf("failed to get updated k0s binary version: %w", err)
	}
	// verify the installed version matches the expected version, unless a custom k0sbinarypath is used
	if h.K0sBinaryPath == "" && !version.Equal(updatedVersion) {
		return fmt.Errorf("updated k0s binary version is %s not %s", updatedVersion, version)
	}

	h.Metadata.K0sBinaryVersion = version

	return nil
}

// K0sDataDir returns the data dir for the host either from host.DataDir or the default from configurer's DataDirDefaultPath
func (h *Host) K0sDataDir() string {
	if h.DataDir == "" {
		return h.Configurer.DataDirDefaultPath()
	}
	return h.DataDir
}

// DrainNode drains the given node
func (h *Host) DrainNode(node *Host, options DrainOption) error {
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "drain %s %s", options.ToKubectlArgs(), node.Metadata.Hostname), exec.Sudo(h))
}

// CordonNode marks the node unschedulable
func (h *Host) CordonNode(node *Host) error {
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "cordon %s", node.Metadata.Hostname), exec.Sudo(h))
}

// UncordonNode marks the node schedulable
func (h *Host) UncordonNode(node *Host) error {
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "uncordon %s", node.Metadata.Hostname), exec.Sudo(h))
}

// DeleteNode deletes the given node from kubernetes
func (h *Host) DeleteNode(node *Host) error {
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "delete node %s", node.Metadata.Hostname), exec.Sudo(h))
}

// Taints returns all taints added to the node.
func (h *Host) Taints(node *Host) ([]string, error) {
	output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), `get node %s -o jsonpath='{range .spec.taints[*]}{.key}={.value}:{.effect}{"\n"}{end}'`, node.Metadata.Hostname), exec.Sudo(h))
	if err != nil {
		return nil, fmt.Errorf("failed to get node taints: %w", err)
	}
	return strings.Split(strings.TrimSpace(output), "\n"), nil
}

// AddTaint adds a taint to the node.
func (h *Host) AddTaint(node *Host, taint string) error {
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "taint nodes --overwrite %s %s", node.Metadata.Hostname, shellescape.Quote(taint)), exec.Sudo(h))
}

// RemoveTaint removes a taint from the node.
func (h *Host) RemoveTaint(node *Host, taint string) error {
	tainted, err := h.Taints(node)
	if err != nil {
		return err
	}
	if !slices.Contains(tainted, taint) {
		// Removing a taint not on the node results in an error, so no action is taken
		return nil
	}
	return h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "taint nodes %s %s-", node.Metadata.Hostname, shellescape.Quote(taint)), exec.Sudo(h))
}

// CheckHTTPStatus will perform a web request to the url and return an error if the http status is not the expected
func (h *Host) CheckHTTPStatus(url string, expected ...int) error {
	status, err := h.Configurer.HTTPStatus(h, url)
	if err != nil {
		return err
	}

	for _, e := range expected {
		if status == e {
			return nil
		}
	}

	return fmt.Errorf("expected response code %d but received %d", expected, status)
}

// NeedCurl returns true when the curl package is needed on the host
func (h *Host) NeedCurl() bool {
	// Windows does not need any packages for web requests
	if h.Configurer.Kind() == "windows" {
		return false
	}

	return !h.Configurer.CommandExist(h, "curl")
}

// NeedIPTables returns true when the iptables package is needed on the host
//
// Deprecated: iptables is only required for k0s versions that are unsupported
// for a long time already (< v1.22.1+k0s.0).
func (h *Host) NeedIPTables() bool {
	// Windows does not need iptables
	if h.Configurer.Kind() == "windows" {
		return false
	}

	// Controllers do not need iptables
	if h.IsController() {
		return false
	}

	return !h.Configurer.CommandExist(h, "iptables")
}

// NeedInetUtils returns true when the inetutils package is needed on the host to run `hostname`.
func (h *Host) NeedInetUtils() bool {
	// Windows does not need inetutils
	if h.Configurer.Kind() == "windows" {
		return false
	}

	return !h.Configurer.CommandExist(h, "hostname")
}

// FileChanged returns true when a remote file has different size or mtime compared to local
// or if an error occurs
func (h *Host) FileChanged(lpath, rpath string) bool {
	lstat, err := gos.Stat(lpath)
	if err != nil {
		log.Debugf("%s: local stat failed: %s", h, err)
		return true
	}
	rstat, err := h.Configurer.Stat(h, rpath, exec.Sudo(h))
	if err != nil {
		log.Debugf("%s: remote stat failed: %s", h, err)
		return true
	}

	if lstat.Size() != rstat.Size() {
		log.Debugf("%s: file sizes for %s differ (%d vs %d)", h, lpath, lstat.Size(), rstat.Size())
		return true
	}

	if !lstat.ModTime().Equal(rstat.ModTime()) {
		log.Debugf("%s: file modtimes for %s differ (%s vs %s)", h, lpath, lstat.ModTime(), rstat.ModTime())
		return true
	}

	return false
}

// ExpandTokens expands percent-sign prefixed tokens in a string, mainly for the download URLs.
// The supported tokens are:
//
//   - %% - literal %
//   - %p - host architecture (arm, arm64, amd64)
//   - %v - k0s version (v1.21.0+k0s.0)
//   - %x - k0s binary extension (.exe on Windows)
//
// Any unknown token is output as-is with the leading % included.
func (h *Host) ExpandTokens(input string, k0sVersion *version.Version) string {
	if input == "" {
		return ""
	}
	builder := strings.Builder{}
	var inPercent bool
	for i := 0; i < len(input); i++ {
		currCh := input[i]
		if inPercent {
			inPercent = false
			switch currCh {
			case '%':
				// Literal %.
				builder.WriteByte('%')
			case 'p':
				// Host architecture (arm, arm64, amd64).
				builder.WriteString(h.Metadata.Arch)
			case 'v':
				// K0s version (v1.21.0+k0s.0)
				builder.WriteString(url.QueryEscape(k0sVersion.String()))
			case 'x':
				// K0s binary extension (.exe on Windows).
				if h.IsConnected() && h.IsWindows() {
					builder.WriteString(".exe")
				}
			default:
				// Unknown token, just output it with the leading %.
				builder.WriteByte('%')
				builder.WriteByte(currCh)
			}
		} else if currCh == '%' {
			inPercent = true
		} else {
			builder.WriteByte(currCh)
		}
	}
	if inPercent {
		// Trailing %.
		builder.WriteByte('%')
	}
	return builder.String()
}

// FlagsChanged returns true when the flags have changed by comparing the host.Metadata.K0sStatusArgs to what host.InstallFlags would produce
func (h *Host) FlagsChanged() bool {
	our, err := h.K0sInstallFlags()
	if err != nil {
		log.Warnf("%s: could not get install flags: %s", h, err)
		our = Flags{}
	}
	ex := our.GetValue("--kubelet-extra-args")
	ourExtra, err := NewFlags(ex)
	if err != nil {
		log.Warnf("%s: could not parse local --kubelet-extra-args value %q: %s", h, ex, err)
	}

	var their Flags
	their = append(their, h.Metadata.K0sStatusArgs...)
	ex = their.GetValue("--kubelet-extra-args")
	theirExtra, err := NewFlags(ex)
	if err != nil {
		log.Warnf("%s: could not parse remote --kubelet-extra-args value %q: %s", h, ex, err)
	}

	if !ourExtra.Equals(theirExtra) {
		log.Debugf("%s: installFlags --kubelet-extra-args seem to have changed: %+v vs %+v", h, theirExtra.Map(), ourExtra.Map())
		return true
	}

	// remove flags that are dropped by k0s or are handled specially
	for _, f := range []string{"--force", "--kubelet-extra-args", "--env", "--data-dir", "--token-file", "--config"} {
		our.Delete(f)
		their.Delete(f)
	}

	if our.Equals(their) {
		log.Debugf("%s: installFlags have not changed", h)
		return false
	}

	log.Debugf("%s: installFlags seem to have changed. existing: %+v new: %+v", h, their.Map(), our.Map())
	return true
}
07070100000094000081A40000000000000000000000016842976900002089000000000000000000000000000000000000004900000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host_test.gopackage cluster

import (
	"fmt"
	"testing"

	cfg "github.com/k0sproject/k0sctl/configurer"
	"github.com/k0sproject/k0sctl/configurer/linux"
	"github.com/k0sproject/rig"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/rig/os"
	"github.com/k0sproject/version"
	"github.com/stretchr/testify/require"
)

func TestHostK0sServiceName(t *testing.T) {
	h := Host{Role: "worker"}
	require.Equal(t, "k0sworker", h.K0sServiceName())
	h.Role = "controller"
	require.Equal(t, "k0scontroller", h.K0sServiceName())
	h.Role = "controller+worker"
	require.Equal(t, "k0scontroller", h.K0sServiceName())
}

type mockconfigurer struct {
	cfg.Linux
	linux.Ubuntu
}

func (c *mockconfigurer) Chmod(_ os.Host, _, _ string, _ ...exec.Option) error {
	return nil
}

func (c *mockconfigurer) MkDir(_ os.Host, _ string, _ ...exec.Option) error {
	return nil
}

func (c *mockconfigurer) K0sCmdf(s string, args ...interface{}) string {
	return fmt.Sprintf("k0s %s", fmt.Sprintf(s, args...))
}

func TestK0sJoinTokenPath(t *testing.T) {
	h := Host{}
	h.Configurer = &mockconfigurer{}
	h.Configurer.SetPath("K0sJoinTokenPath", "from-configurer")

	require.Equal(t, "from-configurer", h.K0sJoinTokenPath())

	h.InstallFlags.Add("--token-file from-install-flags")
	require.Equal(t, "from-install-flags", h.K0sJoinTokenPath())
}

func TestK0sConfigPath(t *testing.T) {
	h := Host{}
	h.Configurer = &mockconfigurer{}
	h.Configurer.SetPath("K0sConfigPath", "from-configurer")

	require.Equal(t, "from-configurer", h.K0sConfigPath())

	h.InstallFlags.Add("--config from-install-long-flag")
	require.Equal(t, "from-install-long-flag", h.K0sConfigPath())
	h.InstallFlags.Delete("--config")
	h.InstallFlags.Add("-c from-install-short-flag")
	require.Equal(t, "from-install-short-flag", h.K0sConfigPath())
}

func TestK0sInstallCommand(t *testing.T) {
	h := Host{Role: "worker", DataDir: "/tmp/k0s", KubeletRootDir: "/tmp/kubelet", Connection: rig.Connection{Localhost: &rig.Localhost{Enabled: true}}}
	_ = h.Connect()
	h.Configurer = &mockconfigurer{}
	h.Configurer.SetPath("K0sConfigPath", "from-configurer")
	h.Configurer.SetPath("K0sJoinTokenPath", "from-configurer")

	cmd, err := h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install worker --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --token-file=from-configurer`, cmd)

	h.Role = "controller"
	h.Metadata.IsK0sLeader = true
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --config=from-configurer`, cmd)

	h.Metadata.IsK0sLeader = false
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --token-file=from-configurer --config=from-configurer`, cmd)

	h.Role = "controller+worker"
	h.Metadata.IsK0sLeader = true
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --enable-worker=true --config=from-configurer`, cmd)

	h.Metadata.IsK0sLeader = false
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --enable-worker=true --token-file=from-configurer --config=from-configurer`, cmd)

	h.Role = "worker"
	h.PrivateAddress = "10.0.0.9"
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install worker --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --token-file=from-configurer --kubelet-extra-args=--node-ip=10.0.0.9`, cmd)

	h.InstallFlags = []string{`--kubelet-extra-args="--foo bar"`}
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install worker --kubelet-extra-args='--foo bar --node-ip=10.0.0.9' --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --token-file=from-configurer`, cmd)

	// Verify that K0sInstallCommand does not modify InstallFlags"
	require.Equal(t, `--kubelet-extra-args='--foo bar'`, h.InstallFlags.Join())

	h.InstallFlags = []string{`--enable-cloud-provider=true`}
	cmd, err = h.K0sInstallCommand()
	require.NoError(t, err)
	require.Equal(t, `k0s install worker --enable-cloud-provider=true --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet --token-file=from-configurer`, cmd)
}

func TestK0sResetCommand(t *testing.T) {
	h := Host{Role: "worker", DataDir: "/tmp/k0s", KubeletRootDir: "/tmp/kubelet", Connection: rig.Connection{Localhost: &rig.Localhost{Enabled: true}}}
	_ = h.Connect()

	h.Configurer = &mockconfigurer{}
	require.Equal(t, `k0s reset --data-dir=/tmp/k0s --kubelet-root-dir=/tmp/kubelet`, h.K0sResetCommand())
}

func TestValidation(t *testing.T) {
	t.Run("installFlags", func(t *testing.T) {
		h := Host{
			Role:         "worker",
			InstallFlags: []string{"--foo"},
		}
		require.NoError(t, h.Validate())

		h.InstallFlags = []string{`--foo=""`, `--bar=''`}
		require.NoError(t, h.Validate())

		h.InstallFlags = []string{`--foo="`, "--bar"}
		require.ErrorContains(t, h.Validate(), "unbalanced quotes")

		h.InstallFlags = []string{"--bar='"}
		require.ErrorContains(t, h.Validate(), "unbalanced quotes")
	})
}

func TestBinaryPath(t *testing.T) {
	h := Host{}
	h.Configurer = &mockconfigurer{}
	h.Configurer.SetPath("K0sBinaryPath", "/foo/bar/k0s")
	require.Equal(t, "/foo/bar", h.k0sBinaryPathDir())
}

func TestExpandTokens(t *testing.T) {
	h := Host{
		Metadata: HostMetadata{
			Arch: "amd64",
		},
	}
	ver, err := version.NewVersion("v1.0.0+k0s.0")
	require.NoError(t, err)
	require.Equal(t, "test%20expand/k0s-v1.0.0%2Bk0s.0-amd64", h.ExpandTokens("test%20expand/k0s-%v-%p%x", ver))
}

func TestFlagsChanged(t *testing.T) {
	cfg := &mockconfigurer{}
	cfg.SetPath("K0sConfigPath", "/tmp/foo.yaml")
	cfg.SetPath("K0sJoinTokenPath", "/tmp/token")
	t.Run("simple", func(t *testing.T) {
		h := Host{
			Configurer:     cfg,
			DataDir:        "/tmp/data",
			Role:           "controller",
			PrivateAddress: "10.0.0.1",
			InstallFlags:   []string{"--foo"},
			Metadata: HostMetadata{
				K0sStatusArgs: []string{"--foo", "--data-dir=/tmp/data", "--token-file=/tmp/token", "--config=/tmp/foo.yaml"},
			},
		}
		require.False(t, h.FlagsChanged())
		h.InstallFlags = []string{"--bar"}
		require.True(t, h.FlagsChanged())
	})
	t.Run("quoted values", func(t *testing.T) {
		h := Host{
			Configurer:     cfg,
			DataDir:        "/tmp/data",
			Role:           "controller+worker",
			PrivateAddress: "10.0.0.1",
			InstallFlags:   []string{"--foo='bar'", "--bar=foo"},
			Metadata: HostMetadata{
				K0sStatusArgs: []string{"--foo=bar", `--bar="foo"`, "--enable-worker=true", "--data-dir=/tmp/data", "--token-file=/tmp/token", "--config=/tmp/foo.yaml", "--kubelet-extra-args=--node-ip=10.0.0.1"},
			},
		}
		newFlags, err := h.K0sInstallFlags()
		require.NoError(t, err)
		require.False(t, h.FlagsChanged(), "flags %+v should not be considered different from %+v", newFlags, h.Metadata.K0sStatusArgs)
		h.InstallFlags = []string{"--foo=bar", `--bar="foo"`}
		require.False(t, h.FlagsChanged())
		h.InstallFlags = []string{"--foo=baz", `--bar="foo"`}
		require.True(t, h.FlagsChanged())
	})
	t.Run("kubelet-extra-args and single", func(t *testing.T) {
		h := Host{
			Configurer:     cfg,
			DataDir:        "/tmp/data",
			Role:           "single",
			PrivateAddress: "10.0.0.1",
			InstallFlags:   []string{"--foo='bar'", `--kubelet-extra-args="--bar=foo --foo='bar'"`},
			Metadata: HostMetadata{
				K0sStatusArgs: []string{"--foo=bar", `--kubelet-extra-args="--bar=foo --foo='bar'"`, "--data-dir=/tmp/data", "--single=true", "--token-file=/tmp/token", "--config=/tmp/foo.yaml"},
			},
		}
		flags, err := h.K0sInstallFlags()
		require.NoError(t, err)
		require.Equal(t, `--foo=bar --kubelet-extra-args='--bar=foo --foo='"'"'bar'"'"'' --data-dir=/tmp/data --single=true --token-file=/tmp/token --config=/tmp/foo.yaml`, flags.Join())
		require.False(t, h.FlagsChanged())
		h.InstallFlags = []string{"--foo='baz'", `--kubelet-extra-args='--bar=baz --foo="bar"'`}
		flags, err = h.K0sInstallFlags()
		require.NoError(t, err)
		require.Equal(t, `--foo=baz --kubelet-extra-args='--bar=baz --foo="bar"' --data-dir=/tmp/data --single=true --token-file=/tmp/token --config=/tmp/foo.yaml`, flags.Join())
		require.True(t, h.FlagsChanged())
	})
}
07070100000095000081A40000000000000000000000016842976900000FA8000000000000000000000000000000000000004500000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts.gopackage cluster

import (
	"context"
	"fmt"
	"strings"
	"sync"
)

// Hosts are destnation hosts
type Hosts []*Host

func (hosts Hosts) Validate() error {
	if len(hosts) == 0 {
		return fmt.Errorf("at least one host required")
	}

	if len(hosts) > 1 {
		hostmap := make(map[string]struct{}, len(hosts))
		for idx, h := range hosts {
			if err := h.Validate(); err != nil {
				return fmt.Errorf("host #%d: %v", idx+1, err)
			}
			if h.Role == "single" {
				return fmt.Errorf("%d hosts defined but includes a host with role 'single': %s", len(hosts), h)
			}
			if _, ok := hostmap[h.String()]; ok {
				return fmt.Errorf("%s: is not unique", h)
			}
			hostmap[h.String()] = struct{}{}
		}
	}

	if len(hosts.Controllers()) < 1 {
		return fmt.Errorf("no hosts with a controller role defined")
	}

	return nil
}

// First returns the first host
func (hosts Hosts) First() *Host {
	if len(hosts) == 0 {
		return nil
	}
	return (hosts)[0]
}

// Last returns the last host
func (hosts Hosts) Last() *Host {
	c := len(hosts) - 1

	if c < 0 {
		return nil
	}

	return hosts[c]
}

// Find returns the first matching Host. The finder function should return true for a Host matching the criteria.
func (hosts Hosts) Find(filter func(h *Host) bool) *Host {
	for _, h := range hosts {
		if filter(h) {
			return (h)
		}
	}
	return nil
}

// Filter returns a filtered list of Hosts. The filter function should return true for hosts matching the criteria.
func (hosts Hosts) Filter(filter func(h *Host) bool) Hosts {
	result := make(Hosts, 0, len(hosts))

	for _, h := range hosts {
		if filter(h) {
			result = append(result, h)
		}
	}

	return result
}

// WithRole returns a ltered list of Hosts that have the given role
func (hosts Hosts) WithRole(s string) Hosts {
	return hosts.Filter(func(h *Host) bool {
		return h.Role == s
	})
}

// Controllers returns hosts with the role "controller"
func (hosts Hosts) Controllers() Hosts {
	return hosts.Filter(func(h *Host) bool { return h.IsController() })
}

// Workers returns hosts with the role "worker"
func (hosts Hosts) Workers() Hosts {
	return hosts.WithRole("worker")
}

// Each runs a function (or multiple functions chained) on every Host.
func (hosts Hosts) Each(ctx context.Context, filters ...func(context.Context, *Host) error) error {
	for _, filter := range filters {
		for _, h := range hosts {
			if err := ctx.Err(); err != nil {
				return fmt.Errorf("error from context: %w", err)
			}
			if err := filter(ctx, h); err != nil {
				return err
			}
		}
	}

	return nil
}

// ParallelEach runs a function (or multiple functions chained) on every Host parallelly.
// Any errors will be concatenated and returned.
func (hosts Hosts) ParallelEach(ctx context.Context, filters ...func(context.Context, *Host) error) error {
	var wg sync.WaitGroup
	var mu sync.Mutex
	var errors []string

	for _, filter := range filters {
		for _, h := range hosts {
			wg.Add(1)
			go func(h *Host) {
				defer wg.Done()
				if err := ctx.Err(); err != nil {
					mu.Lock()
					errors = append(errors, fmt.Sprintf("error from context: %v", err))
					mu.Unlock()
					return
				}
				if err := filter(ctx, h); err != nil {
					mu.Lock()
					errors = append(errors, fmt.Sprintf("%s: %s", h.String(), err.Error()))
					mu.Unlock()
				}
			}(h)
		}
		wg.Wait()
	}

	if len(errors) > 0 {
		return fmt.Errorf("failed on %d hosts:\n - %s", len(errors), strings.Join(errors, "\n - "))
	}

	return nil
}

// BatchedParallelEach runs a function (or multiple functions chained) on every Host parallelly in groups of batchSize hosts.
func (hosts Hosts) BatchedParallelEach(ctx context.Context, batchSize int, filter ...func(context.Context, *Host) error) error {
	for i := 0; i < len(hosts); i += batchSize {
		end := i + batchSize
		if end > len(hosts) {
			end = len(hosts)
		}
		if err := ctx.Err(); err != nil {
			return fmt.Errorf("error from context: %w", err)
		}
		if err := hosts[i:end].ParallelEach(ctx, filter...); err != nil {
			return err
		}
	}

	return nil
}
07070100000096000081A40000000000000000000000016842976900000460000000000000000000000000000000000000004A00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/hosts_test.gopackage cluster

import (
	"context"
	"errors"
	"testing"

	"github.com/stretchr/testify/require"
)

func TestHostsEach(t *testing.T) {
	hosts := Hosts{
		&Host{Role: "controller"},
		&Host{Role: "worker"},
	}

	t.Run("success", func(t *testing.T) {
		var roles []string
		fn := func(_ context.Context, h *Host) error {
			roles = append(roles, h.Role)
			return nil
		}
		err := hosts.Each(context.Background(), fn)
		require.NoError(t, err)
		require.ElementsMatch(t, []string{"controller", "worker"}, roles)
		require.Len(t, roles, 2)
	})

	t.Run("context cancel", func(t *testing.T) {
		var count int
		ctx, cancel := context.WithCancel(context.Background())

		fn := func(ctx context.Context, h *Host) error {
			count++
			cancel()
			return nil
		}
		err := hosts.Each(ctx, fn)
		require.Equal(t, 1, count)
		require.Error(t, err)
		require.ErrorContains(t, err, "cancel")
	})

	t.Run("error", func(t *testing.T) {
		fn := func(_ context.Context, h *Host) error {
			return errors.New("test")
		}
		err := hosts.Each(context.Background(), fn)
		require.Error(t, err)
		require.ErrorContains(t, err, "test")
	})
}
07070100000097000081A40000000000000000000000016842976900001979000000000000000000000000000000000000004300000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.gopackage cluster

import (
	"compress/gzip"
	"context"
	"encoding/base64"
	"fmt"
	"io"
	"strings"
	"time"

	"github.com/jellydator/validation"

	"al.essio.dev/pkg/shellescape"
	"github.com/creasty/defaults"
	"github.com/k0sproject/dig"
	"github.com/k0sproject/k0sctl/pkg/retry"
	"github.com/k0sproject/rig/exec"
	"github.com/k0sproject/version"
	"gopkg.in/yaml.v2"
)

// K0sMinVersion is the minimum supported k0s version
const K0sMinVersion = "0.11.0-rc1"

var (
	k0sSupportedVersion           = version.MustParse(K0sMinVersion)
	k0sDynamicConfigSince         = version.MustParse("1.22.2+k0s.2")
	k0sTokenCreateConfigFlagUntil = version.MustParse("v1.23.4-rc.1+k0s.0")
)

// K0s holds configuration for bootstraping a k0s cluster
type K0s struct {
	Version        *version.Version `yaml:"version,omitempty"`
	VersionChannel string           `yaml:"versionChannel,omitempty"`
	DynamicConfig  bool             `yaml:"dynamicConfig,omitempty" default:"false"`
	Config         dig.Mapping      `yaml:"config,omitempty"`
	Metadata       K0sMetadata      `yaml:"-"`
}

// K0sMetadata contains gathered information about k0s cluster
type K0sMetadata struct {
	ClusterID        string
	VersionDefaulted bool
}

// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml
func (k *K0s) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type k0s K0s
	yk := (*k0s)(k)

	if err := unmarshal(yk); err != nil {
		return err
	}

	return defaults.Set(k)
}

// MarshalYAML implements yaml.Marshaler interface
func (k *K0s) MarshalYAML() (interface{}, error) {
	if k == nil {
		return nil, nil
	}
	type k0s K0s
	yk := (*k0s)(k)

	yml, err := yaml.Marshal(yk)
	if err != nil {
		return nil, fmt.Errorf("marshal k0s: %w", err)
	}

	if string(yml) == "{}\n" {
		return nil, nil
	}

	return yk, nil
}

// SetDefaults sets default values
func (k *K0s) SetDefaults() {
	if k.Version == nil {
		return
	}

	if k.Version.IsZero() {
		k.Version = nil
	}
}

func validateVersion(value interface{}) error {
	v, ok := value.(*version.Version)
	if !ok {
		return fmt.Errorf("not a version")
	}

	if v == nil || v.IsZero() {
		return nil
	}

	if v.LessThan(k0sSupportedVersion) {
		return fmt.Errorf("minimum supported k0s version is %s", k0sSupportedVersion)
	}

	return nil
}

func (k *K0s) Validate() error {
	return validation.ValidateStruct(k,
		validation.Field(&k.Version, validation.By(validateVersion)),
		validation.Field(&k.DynamicConfig, validation.By(k.validateMinDynamic())),
		validation.Field(&k.VersionChannel, validation.In("stable", "latest"), validation.When(k.VersionChannel != "")),
	)
}

func (k *K0s) validateMinDynamic() func(interface{}) error {
	return func(value interface{}) error {
		dc, ok := value.(bool)
		if !ok {
			return fmt.Errorf("not a boolean")
		}
		if !dc {
			return nil
		}

		if k.Version != nil && !k.Version.IsZero() && k.Version.LessThan(k0sDynamicConfigSince) {
			return fmt.Errorf("dynamic config only available since k0s version %s", k0sDynamicConfigSince)
		}

		return nil
	}
}

func (k *K0s) NodeConfig() dig.Mapping {
	return dig.Mapping{
		"apiVersion": k.Config.DigString("apiVersion"),
		"kind":       k.Config.DigString("kind"),
		"Metadata": dig.Mapping{
			"name": k.Config.DigMapping("metadata")["name"],
		},
		"spec": dig.Mapping{
			"api":     k.Config.DigMapping("spec", "api"),
			"network": k.Config.DigMapping("spec", "network"),
			"storage": k.Config.DigMapping("spec", "storage"),
		},
	}
}

// GenerateToken runs the k0s token create command
func (k *K0s) GenerateToken(ctx context.Context, h *Host, role string, expiry time.Duration) (string, error) {
	var k0sFlags Flags
	k0sFlags.Add(fmt.Sprintf("--role %s", role))
	k0sFlags.Add(fmt.Sprintf("--expiry %s", expiry))

	k0sFlags.AddOrReplace(fmt.Sprintf("--data-dir=%s", h.K0sDataDir()))

	if k.Version.LessThanOrEqual(k0sTokenCreateConfigFlagUntil) {
		k0sFlags.Add(fmt.Sprintf("--config %s", shellescape.Quote(h.K0sConfigPath())))
	}

	var token string
	err := retry.AdaptiveTimeout(ctx, retry.DefaultTimeout, func(_ context.Context) error {
		output, err := h.ExecOutput(h.Configurer.K0sCmdf("token create %s", k0sFlags.Join()), exec.HideOutput(), exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("create token: %w", err)
		}
		token = output
		return nil
	})
	return token, err
}

// GetClusterID uses kubectl to fetch the kube-system namespace uid
func (k *K0s) GetClusterID(h *Host) (string, error) {
	return h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get -n kube-system namespace kube-system -o template={{.metadata.uid}}"), exec.Sudo(h))
}

// TokenData is data collected from a decoded k0s token
type TokenData struct {
	ID         string
	URL        string
	Token      string
	Kubeconfig []byte
}

// ParseToken returns TokenData for a token string
func ParseToken(s string) (TokenData, error) {
	data := TokenData{Token: s}

	b64 := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
	_, err := base64.StdEncoding.Decode(b64, []byte(s))
	if err != nil {
		return data, fmt.Errorf("failed to decode token: %w", err)
	}

	sr := strings.NewReader(s)
	b64r := base64.NewDecoder(base64.StdEncoding, sr)
	gzr, err := gzip.NewReader(b64r)
	if err != nil {
		return data, fmt.Errorf("failed to create a reader for token: %w", err)
	}
	defer gzr.Close()

	c, err := io.ReadAll(gzr)
	if err != nil {
		return data, fmt.Errorf("failed to uncompress token: %w", err)
	}
	data.Kubeconfig = c
	cfg := dig.Mapping{}
	err = yaml.Unmarshal(c, &cfg)
	if err != nil {
		return data, fmt.Errorf("failed to unmarshal token: %w", err)
	}

	users, ok := cfg.Dig("users").([]interface{})
	if !ok || len(users) < 1 {
		return data, fmt.Errorf("failed to find users in token")
	}

	user, ok := users[0].(dig.Mapping)
	if !ok {
		return data, fmt.Errorf("failed to find user in token")
	}

	token, ok := user.Dig("user", "token").(string)
	if !ok {
		return data, fmt.Errorf("failed to find user token in token")
	}

	idx := strings.IndexRune(token, '.')
	if idx < 0 {
		return data, fmt.Errorf("failed to find separator in token")
	}

	data.ID = token[0:idx]

	clusters, ok := cfg.Dig("clusters").([]interface{})
	if !ok || len(clusters) < 1 {
		return data, fmt.Errorf("failed to find clusters in token")
	}
	cluster, ok := clusters[0].(dig.Mapping)
	if !ok {
		return data, fmt.Errorf("failed to find cluster in token")
	}
	url := cluster.DigString("cluster", "server")
	if url == "" {
		return data, fmt.Errorf("failed to find cluster url in token")
	}
	data.URL = url

	return data, nil
}
07070100000098000081A40000000000000000000000016842976900000B33000000000000000000000000000000000000004800000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.gopackage cluster

import (
	"testing"

	"github.com/creasty/defaults"
	"github.com/k0sproject/version"
	"github.com/stretchr/testify/require"
	"gopkg.in/yaml.v2"
)

func TestParseToken(t *testing.T) {
	token := "H4sIAAAAAAAC/2xVXY/iOBZ9r1/BH6geO4GeAWkfKiEmGGLKjn1N/BbidAFOgjuk+Frtf18V3SPtSvN2fc/ROdaVfc9L6Q9Q9+fDqZuNLvilaj7PQ92fZy+vo9/17GU0Go3OdX+p+9loPwz+PPvjD/xn8A3/+Q19C2bfx+Pwyanqfjj8OFTlUL+Wn8P+1B+G+6sth3I2WudoWOc4FspSeYjmAqjKlaEcESWeGBpih2muRCQSNucavEEkzBWNDGoApDV1t19W6uNSbJsyRzS1mPc7TVdiDknV0qNFQmjl1zvsaZmao3RECHVd8YZEFtlEgGW8ISmXBIQiY6km+wwbr5v9yoIvVHs71pL81CAio0yYpQ2DJMFSe1InWHEZMZHQveiqa/3hf2Eg+v/FpKJdnZifHCA2aKK5IwwSsbVzYnZgJkWLdUZ8IbfCZA5CE1hSKhxliZ2rkKRxw2hxZIlSEHMgwFWCckUTi8iTmyNy+ZqJUtktO2Y9C8Wpuk8DsTUT7ehnjt9uBTQ0T7yDB9nyw+A4Tlb5wt2NbHgB5LSJpwvR2Ytpp6oKm/lG2ZvUZoDERjs9vubzamxJcZEaX6vDwLKWFeUWIoOqi7z/hWx7c2q77DfcJ5BkQQFAyxYw6xix8BZILAar8Ha3GM7l420ssZ/UZE/rrQtUytSus4ssXGKOissKkdgiOskw1fowPKRqxnFLPy0hj1pPvV6IC0t4AOhGgZDlZjFdGYdXLBVZBozKrUccW6Ra2mQNm5sF9bsHXRVqv8lB7E3XmNyZjKHTSm7Jp82HyxoJDom56HY8zgFa6/xCoOtdIL8qF8t71rDUYBZAI247ZHnpiluZn+9WNu8GsvEusFuOpvNS20J/+GUN1aN2U2kfpFQouVaBj3PsW6VgXwXVeJfSd4DlLdN2JR+gqoAed8hEBcB7OXc4J3Dl2jLuSCQCL0pHo9jhiCU2ygCcSC3hh2moFEQWNTFvfaQS2snGLJXDMdfFWCiquBKRUh8XqZZXgZIbaJEYTLbcUQnBtLDkY8VbWuzmMAhH97ka1tWWKN1lvQFLICEb3tq+0vu+VNXEPqKvN/gQjkQSsejLv3BsUjTRNk8mpNbMF46d1Ju/SURPRWihBOJtS5eVwp9ZQhvIB8+UCo1ksSXg7IPcS2wNc35cphHKVKNE4rebbSR2ODpxd5uYAA/VfH+JW9Jt1GRv231eJ9mj1uao2+Z7pRrB2ulP4+xF5kOxDtUF3PLKJXmXCb4XgQmzuRFVmmGZnCaA/nrIBdCvuRduvMpVs8lcNi7UcDVhRG0A93JLYpP66yqYgJoLoZumlQ9x2xFD8znIkux77oacdWqSdZSVyjCWnkKmb+9WDz/Nh5+b9O1SIDIUHaC6bW5V4qFsYSnSRmUIloXCuV1MaE7IsQAxBkR5ndqASRZtFDVGm7VszHGzwEfhJqzUzTV2tMi1iG369dfsmjVvkxKKfhMPgjsccEUPLMmCTcJCsTDrfGHGdXsOJcBpo4ezQd7sQroC3EQrdLtVD+Z16lZCY58rEO8SrX7vZiId/+AIckiaRa5YBIl67uU1P/3rZTTqyraejRw6v1Snbqhvw6+U+FX/Som/I+PJ+mp8np+nz13d1MPr7nQazkNf+v9X++z7uhte/1Z6Nt2hs7NRfOp+HD5efF//qPu6q+rzbPTv/7x8qT7Nf4v8g/zT+HmF4eTqbjY6fD+E949vVzeZ7vHx8mM6uPCATi//DQAA//+MVAsnAgcAAA=="

	tokendata, err := ParseToken(token)
	require.NoError(t, err)
	require.Equal(t, "i6i3yg", tokendata.ID)
	require.Equal(t, "https://172.17.0.2:6443", tokendata.URL)
}

func TestUnmarshal(t *testing.T) {
	t.Run("version given", func(t *testing.T) {
		k0s := &K0s{}
		err := yaml.Unmarshal([]byte("version: 0.11.0-rc1\ndynamicConfig: false\n"), k0s)
		require.NoError(t, err)
		require.Equal(t, "v0.11.0-rc1", k0s.Version.String())
		require.NoError(t, k0s.Validate())
	})

	t.Run("version not given", func(t *testing.T) {
		k0s := &K0s{}
		err := yaml.Unmarshal([]byte("dynamicConfig: false\n"), k0s)
		require.NoError(t, err)
		require.NoError(t, k0s.Validate())
	})
}

func TestVersionDefaulting(t *testing.T) {
	t.Run("version given", func(t *testing.T) {
		k0s := &K0s{Version: version.MustParse("v0.11.0-rc1")}
		require.NoError(t, defaults.Set(k0s))
		require.NoError(t, k0s.Validate())
	})
}
07070100000099000081A40000000000000000000000016842976900001568000000000000000000000000000000000000004700000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/options.gopackage cluster

import (
	"fmt"
	"strings"
	"time"

	"al.essio.dev/pkg/shellescape"
	"github.com/creasty/defaults"
	"github.com/jellydator/validation"
)

// Options for cluster operations.
type Options struct {
	Wait        WaitOption        `yaml:"wait"`
	Drain       DrainOption       `yaml:"drain"`
	Concurrency ConcurrencyOption `yaml:"concurrency"`
	EvictTaint  EvictTaintOption  `yaml:"evictTaint"`
}

// UnmarshalYAML implements the yaml.Unmarshaler interface for Options.
func (o *Options) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type options Options
	var tmp options

	if err := unmarshal(&tmp); err != nil {
		return err
	}

	if err := defaults.Set(&tmp); err != nil {
		return fmt.Errorf("failed to set defaults for options: %w", err)
	}

	*o = Options(tmp)
	return nil
}

// WaitOption controls the wait behavior for cluster operations.
type WaitOption struct {
	Enabled bool `yaml:"enabled" default:"true"`
}

// DrainOption controls the drain behavior for cluster operations.
type DrainOption struct {
	Enabled                  bool          `yaml:"enabled" default:"true"`
	GracePeriod              time.Duration `yaml:"gracePeriod" default:"120s"`
	Timeout                  time.Duration `yaml:"timeout" default:"300s"`
	Force                    bool          `yaml:"force" default:"true"`
	IgnoreDaemonSets         bool          `yaml:"ignoreDaemonSets" default:"true"`
	DeleteEmptyDirData       bool          `yaml:"deleteEmptyDirData" default:"true"`
	PodSelector              string        `yaml:"podSelector" default:""`
	SkipWaitForDeleteTimeout time.Duration `yaml:"skipWaitForDeleteTimeout" default:"0s"`
}

// ToKubectlArgs converts the DrainOption to kubectl arguments.
func (d *DrainOption) ToKubectlArgs() string {
	args := []string{}

	if d.Force {
		args = append(args, "--force")
	}

	if d.GracePeriod > 0 {
		args = append(args, fmt.Sprintf("--grace-period=%d", int(d.GracePeriod.Seconds())))
	}

	if d.Timeout > 0 {
		args = append(args, fmt.Sprintf("--timeout=%s", d.Timeout))
	}

	if d.PodSelector != "" {
		args = append(args, fmt.Sprintf("--pod-selector=%s", shellescape.Quote(d.PodSelector)))
	}

	if d.SkipWaitForDeleteTimeout > 0 {
		args = append(args, fmt.Sprintf("--skip-wait-for-delete-timeout=%s", d.SkipWaitForDeleteTimeout))
	}

	if d.DeleteEmptyDirData {
		args = append(args, "--delete-emptydir-data")
	}

	if d.IgnoreDaemonSets {
		args = append(args, "--ignore-daemonsets")
	}

	return strings.Join(args, " ")
}

// UnmarshalYAML implements the yaml.Unmarshaler interface for DrainOption.
func (d *DrainOption) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type drainOption DrainOption
	var tmp drainOption

	if err := unmarshal(&tmp); err != nil {
		return err
	}

	if err := defaults.Set(&tmp); err != nil {
		return fmt.Errorf("failed to set defaults for drain option: %w", err)
	}

	*d = DrainOption(tmp)
	return nil
}

// ConcurrencyOption controls how many hosts are operated on at once.
type ConcurrencyOption struct {
	Limit                   int `yaml:"limit" default:"30"`                   // Max number of hosts to operate on at once
	WorkerDisruptionPercent int `yaml:"workerDisruptionPercent" default:"10"` // Max percentage of hosts to disrupt at once
	Uploads                 int `yaml:"uploads" default:"5"`                  // Max concurrent file uploads
}

// UnmarshalYAML implements the yaml.Unmarshaler interface for ConcurrencyOption.
func (c *ConcurrencyOption) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type concurrencyOption ConcurrencyOption
	var tmp concurrencyOption

	if err := unmarshal(&tmp); err != nil {
		return err
	}

	if err := defaults.Set(&tmp); err != nil {
		return fmt.Errorf("failed to set defaults for concurrency option: %w", err)
	}

	*c = ConcurrencyOption(tmp)
	return nil
}

// EvictTaintOption controls whether and how a taint is applied to nodes
// before service-affecting operations like upgrade or reset.
type EvictTaintOption struct {
	Enabled           bool   `yaml:"enabled" default:"false"`
	Taint             string `yaml:"taint" default:"k0sctl.k0sproject.io/evict=true"`
	Effect            string `yaml:"effect" default:"NoExecute"`
	ControllerWorkers bool   `yaml:"controllerWorkers" default:"false"`
}

// String returns a string representation of the EvictTaintOption (<taint>:<effect>)
func (e *EvictTaintOption) String() string {
	if e == nil || !e.Enabled {
		return ""
	}
	return e.Taint + ":" + e.Effect
}

// UnmarshalYAML implements the yaml.Unmarshaler interface for EvictTaintOption.
func (e *EvictTaintOption) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type evictTaintOption EvictTaintOption
	var tmp evictTaintOption

	if err := unmarshal(&tmp); err != nil {
		return err
	}

	if err := defaults.Set(&tmp); err != nil {
		return fmt.Errorf("set defaults for evictTaint: %w", err)
	}

	*e = EvictTaintOption(tmp)
	return nil
}

// Validate checks if the EvictTaintOption is valid.
func (e *EvictTaintOption) Validate() error {
	if e == nil || !e.Enabled {
		return nil
	}

	return validation.ValidateStruct(e,
		validation.Field(&e.Taint,
			validation.Required,
			validation.By(func(value interface{}) error {
				s, _ := value.(string)
				if !strings.Contains(s, "=") {
					return fmt.Errorf("must be in the form key=value")
				}
				return nil
			}),
		),
		validation.Field(&e.Effect,
			validation.Required,
			validation.In("NoExecute", "NoSchedule", "PreferNoSchedule"),
		),
	)
}
0707010000009A000081A4000000000000000000000001684297690000122A000000000000000000000000000000000000004400000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec.gopackage cluster

import (
	"fmt"
	"strings"

	"github.com/creasty/defaults"
	"github.com/jellydator/validation"
	"gopkg.in/yaml.v2"
)

// Spec defines cluster config spec section
type Spec struct {
	Hosts   Hosts   `yaml:"hosts,omitempty"`
	K0s     *K0s    `yaml:"k0s,omitempty"`
	Options Options `yaml:"options"`

	k0sLeader *Host
}

// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml
func (s *Spec) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type spec Spec
	ys := (*spec)(s)
	ys.K0s = &K0s{}

	if err := unmarshal(ys); err != nil {
		return err
	}

	return defaults.Set(s)
}

// MarshalYAML overrides default YAML marshaling to get rid of "k0s: null" when nothing is set in spec.k0s
func (s *Spec) MarshalYAML() (interface{}, error) {
	type spec Spec

	copy := spec(*s)

	if s.K0s == nil || isEmptyK0s(s.K0s) {
		copy.K0s = nil
	}

	return copy, nil
}

func isEmptyK0s(k *K0s) bool {
	if k == nil {
		return true
	}
	if k.Config != nil {
		return false
	}
	if k.Version != nil {
		return false
	}
	return len(k.Config) == 0
}

// SetDefaults sets defaults
func (s *Spec) SetDefaults() {
	if s.K0s == nil {
		s.K0s = &K0s{}
		_ = defaults.Set(s.K0s)
	}
}

// K0sLeader returns a controller host that is selected to be a "leader",
// or an initial node, a node that creates join tokens for other controllers.
func (s *Spec) K0sLeader() *Host {
	if s.k0sLeader == nil {
		controllers := s.Hosts.Controllers()

		// Pick the first controller that reports to be running and persist the choice
		for _, h := range controllers {
			if !h.Reset && h.Metadata.K0sBinaryVersion != nil && h.Metadata.K0sRunningVersion != nil {
				s.k0sLeader = h
				break
			}
		}

		// Still nil?  Fall back to first "controller" host, do not persist selection.
		if s.k0sLeader == nil {
			return controllers.First()
		}
	}

	return s.k0sLeader
}

func (s *Spec) Validate() error {
	return validation.ValidateStruct(s,
		validation.Field(&s.Hosts, validation.Required),
		validation.Field(&s.Hosts),
		validation.Field(&s.K0s),
	)
}

type k0sCPLBConfig struct {
	Spec struct {
		Network struct {
			ControlPlaneLoadBalancing struct {
				Enabled    bool   `yaml:"enabled"`
				Type       string `yaml:"type"`
				Keepalived struct {
					VirtualServers []struct {
						IPAddress string `yaml:"ipAddress"`
					} `yaml:"virtualServers"`
				} `yaml:"keepalived"`
			} `yaml:"controlPlaneLoadBalancing"`
		} `yaml:"network"`
	} `yaml:"spec"`
}

func (s *Spec) clusterExternalAddress() string {
	if s.K0s != nil {
		if a := s.K0s.Config.DigString("spec", "api", "externalAddress"); a != "" {
			return a
		}

		if cfg, err := yaml.Marshal(s.K0s.Config); err == nil {
			k0scfg := k0sCPLBConfig{}
			if err := yaml.Unmarshal(cfg, &k0scfg); err == nil {
				cplb := k0scfg.Spec.Network.ControlPlaneLoadBalancing
				if cplb.Enabled && cplb.Type == "Keepalived" {
					for _, vs := range cplb.Keepalived.VirtualServers {
						if addr := vs.IPAddress; addr != "" {
							return addr
						}
					}
				}
			}
		}
	}

	if leader := s.K0sLeader(); leader != nil {
		return leader.Address()
	}

	return ""
}

func (s *Spec) clusterInternalAddress() string {
	leader := s.K0sLeader()
	if leader.PrivateAddress != "" {
		return leader.PrivateAddress
	} else {
		return leader.Address()
	}
}

const defaultAPIPort = 6443

func (s *Spec) APIPort() int {
	if s.K0s != nil {
		if p, ok := s.K0s.Config.Dig("spec", "api", "port").(int); ok {
			return p
		}
	}
	return defaultAPIPort
}

// KubeAPIURL returns an external url to the cluster's kube API
func (s *Spec) KubeAPIURL() string {
	return fmt.Sprintf("https://%s:%d", formatIPV6(s.clusterExternalAddress()), s.APIPort())
}

// InternalKubeAPIURL returns a cluster internal url to the cluster's kube API
func (s *Spec) InternalKubeAPIURL() string {
	return fmt.Sprintf("https://%s:%d", formatIPV6(s.clusterInternalAddress()), s.APIPort())
}

// NodeInternalKubeAPIURL returns a cluster internal url to the node's kube API
func (s *Spec) NodeInternalKubeAPIURL(h *Host) string {
	addr := "127.0.0.1"

	// spec.api.onlyBindToAddress was introduced in k0s 1.30. Setting it to true will make the API server only
	// listen on the IP address configured by the `address` option.
	if onlyBindAddr, ok := s.K0s.Config.Dig("spec", "api", "onlyBindToAddress").(bool); ok && onlyBindAddr {
		if h.PrivateAddress != "" {
			addr = h.PrivateAddress
		} else {
			addr = h.Address()
		}
	}

	return fmt.Sprintf("https://%s:%d", formatIPV6(addr), s.APIPort())
}

func formatIPV6(address string) string {
	if strings.Contains(address, ":") {
		return fmt.Sprintf("[%s]", address)
	}
	return address
}
0707010000009B000081A400000000000000000000000168429769000006D5000000000000000000000000000000000000004900000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/spec_test.gopackage cluster

import (
	"testing"

	"github.com/k0sproject/dig"
	"github.com/k0sproject/rig"
	"github.com/stretchr/testify/require"
	"gopkg.in/yaml.v2"
)

func TestKubeAPIURL(t *testing.T) {
	t.Run("with external address and port", func(t *testing.T) {
		spec := &Spec{
			K0s: &K0s{
				Config: dig.Mapping(map[string]any{
					"spec": dig.Mapping(map[string]any{
						"api": dig.Mapping(map[string]any{
							"port":            6444,
							"externalAddress": "test.example.com",
						}),
					}),
				}),
			}, Hosts: Hosts{
				&Host{
					Role: "controller",
					Connection: rig.Connection{
						SSH: &rig.SSH{
							Address: "10.0.0.1",
						},
					},
				},
			},
		}
		require.Equal(t, "https://test.example.com:6444", spec.KubeAPIURL())
	})

	t.Run("without k0s config", func(t *testing.T) {
		spec := &Spec{
			Hosts: Hosts{
				&Host{
					Role:           "controller",
					PrivateAddress: "10.0.0.1",
					Connection: rig.Connection{
						SSH: &rig.SSH{
							Address: "192.168.0.1",
						},
					},
				},
			},
		}
		require.Equal(t, "https://192.168.0.1:6443", spec.KubeAPIURL())
	})

	t.Run("with CPLB", func(t *testing.T) {
		specYaml := []byte(`
hosts:
  - role: controller
    ssh:
      address: 192.168.0.1
    privateAddress: 10.0.0.1
k0s:
  config:
    spec:
      network:
        controlPlaneLoadBalancing:
          enabled: true
          type: Keepalived
          keepalived:
            vrrpInstances:
            - virtualIPs: ["192.168.0.10/24"]
              authPass: CPLB
            virtualServers:
            - ipAddress: 192.168.0.10`)

		spec := &Spec{}
		err := yaml.Unmarshal(specYaml, spec)
		require.NoError(t, err)

		require.Equal(t, "https://192.168.0.10:6443", spec.KubeAPIURL())
	})
}
0707010000009C000081A4000000000000000000000001684297690000142A000000000000000000000000000000000000004A00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile.gopackage cluster

import (
	"fmt"
	"os"
	"path"
	"strconv"
	"strings"

	"github.com/bmatcuk/doublestar/v4"
	"github.com/jellydator/validation"
	log "github.com/sirupsen/logrus"
)

type LocalFile struct {
	Path     string
	PermMode string
}

// UploadFile describes a file to be uploaded for the host
type UploadFile struct {
	Name            string       `yaml:"name,omitempty"`
	Source          string       `yaml:"src"`
	DestinationDir  string       `yaml:"dstDir"`
	DestinationFile string       `yaml:"dst"`
	PermMode        interface{}  `yaml:"perm"`
	DirPermMode     interface{}  `yaml:"dirPerm"`
	User            string       `yaml:"user"`
	Group           string       `yaml:"group"`
	PermString      string       `yaml:"-"`
	DirPermString   string       `yaml:"-"`
	Sources         []*LocalFile `yaml:"-"`
	Base            string       `yaml:"-"`
}

func (u UploadFile) Validate() error {
	return validation.ValidateStruct(&u,
		validation.Field(&u.Source, validation.Required),
		validation.Field(&u.DestinationFile, validation.Required.When(u.DestinationDir == "").Error("dst or dstdir required")),
		validation.Field(&u.DestinationDir, validation.Required.When(u.DestinationFile == "").Error("dst or dstdir required")),
	)
}

// converts string or integer value to octal string for chmod
func permToString(val interface{}) (string, error) {
	var s string
	switch t := val.(type) {
	case int, float64:
		var num int
		if n, ok := t.(float64); ok {
			num = int(n)
		} else {
			num = t.(int)
		}

		if num < 0 {
			return s, fmt.Errorf("invalid permission: %d: must be a positive value", num)
		}
		if num == 0 {
			return s, fmt.Errorf("invalid nil permission")
		}
		s = fmt.Sprintf("%#o", num)
	case string:
		s = t
	default:
		return "", nil
	}

	for i, c := range s {
		n, err := strconv.Atoi(string(c))
		if err != nil {
			return s, fmt.Errorf("failed to parse permission %s: %w", s, err)
		}

		// These could catch some weird octal conversion mistakes
		if i == 1 && n < 4 {
			return s, fmt.Errorf("invalid permission %s: owner would have unconventional access", s)
		}
		if n > 7 {
			return s, fmt.Errorf("invalid permission %s: octal value can't have numbers over 7", s)
		}
	}

	return s, nil
}

// UnmarshalYAML sets in some sane defaults when unmarshaling the data from yaml
func (u *UploadFile) UnmarshalYAML(unmarshal func(interface{}) error) error {
	type uploadFile UploadFile
	yu := (*uploadFile)(u)

	if err := unmarshal(yu); err != nil {
		return err
	}

	fp, err := permToString(u.PermMode)
	if err != nil {
		return err
	}
	u.PermString = fp

	dp, err := permToString(u.DirPermMode)
	if err != nil {
		return err
	}
	u.DirPermString = dp

	return u.resolve()
}

// String returns the file bundle name or if it is empty, the source.
func (u *UploadFile) String() string {
	if u.Name == "" {
		return u.Source
	}
	return u.Name
}

// Owner returns a chown compatible user:group string from User and Group, or empty when neither are set.
func (u *UploadFile) Owner() string {
	return strings.TrimSuffix(fmt.Sprintf("%s:%s", u.User, u.Group), ":")
}

// returns true if the string contains any glob characters
func isGlob(s string) bool {
	return strings.ContainsAny(s, "*%?[]{}")
}

// sets the destination and resolves any globs/local paths into u.Sources
func (u *UploadFile) resolve() error {
	if u.IsURL() {
		if u.DestinationFile == "" {
			if u.DestinationDir != "" {
				u.DestinationFile = path.Join(u.DestinationDir, path.Base(u.Source))
			} else {
				u.DestinationFile = path.Base(u.Source)
			}
		}
		return nil
	}

	if isGlob(u.Source) {
		return u.glob(u.Source)
	}

	stat, err := os.Stat(u.Source)
	if err != nil {
		return fmt.Errorf("failed to stat local path for %s: %w", u, err)
	}

	if stat.IsDir() {
		log.Tracef("source %s is a directory, assuming %s/**/*", u.Source, u.Source)
		return u.glob(path.Join(u.Source, "**/*"))
	}

	perm := u.PermString
	if perm == "" {
		perm = fmt.Sprintf("%o", stat.Mode())
	}
	u.Base = path.Dir(u.Source)
	u.Sources = []*LocalFile{
		{Path: path.Base(u.Source), PermMode: perm},
	}

	return nil
}

// finds files based on a glob pattern
func (u *UploadFile) glob(src string) error {
	base, pattern := doublestar.SplitPattern(src)
	u.Base = base
	fsys := os.DirFS(base)
	sources, err := doublestar.Glob(fsys, pattern)
	if err != nil {
		return err
	}

	for _, s := range sources {
		abs := path.Join(base, s)
		log.Tracef("glob %s found: %s", abs, s)
		stat, err := os.Stat(abs)
		if err != nil {
			return fmt.Errorf("failed to stat file %s: %w", u, err)
		}

		if stat.IsDir() {
			log.Tracef("%s is a directory", abs)
			continue
		}

		perm := u.PermString
		if perm == "" {
			perm = fmt.Sprintf("%o", stat.Mode())
		}

		u.Sources = append(u.Sources, &LocalFile{Path: s, PermMode: perm})
	}

	if len(u.Sources) == 0 {
		return fmt.Errorf("no files found for %s", u)
	}

	if u.DestinationFile != "" && len(u.Sources) > 1 {
		return fmt.Errorf("found multiple files for %s but single file dst %s defined", u, u.DestinationFile)
	}

	return nil
}

// IsURL returns true if the source is a URL
func (u *UploadFile) IsURL() bool {
	return strings.Contains(u.Source, "://")
}
0707010000009D000081A4000000000000000000000001684297690000040F000000000000000000000000000000000000004F00000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/uploadfile_test.gopackage cluster

import (
	"testing"

	"github.com/stretchr/testify/require"
	"gopkg.in/yaml.v2"
)

func TestPermStringUnmarshalWithOctal(t *testing.T) {
	u := UploadFile{}
	yml := []byte(`
src: .
dstDir: .
perm: 0755
`)

	require.NoError(t, yaml.Unmarshal(yml, &u))
	require.Equal(t, "0755", u.PermString)
}

func TestPermStringUnmarshalWithString(t *testing.T) {
	u := UploadFile{}
	yml := []byte(`
src: .
dstDir: .
perm: "0755"
`)

	require.NoError(t, yaml.Unmarshal(yml, &u))
	require.Equal(t, "0755", u.PermString)
}

func TestPermStringUnmarshalWithInvalidString(t *testing.T) {
	u := UploadFile{}
	yml := []byte(`
src: .
dstDir: .
perm: u+rwx
`)

	require.Error(t, yaml.Unmarshal(yml, &u))
}

func TestPermStringUnmarshalWithInvalidNumber(t *testing.T) {
	u := UploadFile{}
	yml := []byte(`
src: .
dstDir: .
perm: 0800
`)

	require.Error(t, yaml.Unmarshal(yml, &u))
}

func TestPermStringUnmarshalWithZero(t *testing.T) {
	u := UploadFile{}
	yml := []byte(`
src: .
dstDir: .
perm: 0
`)

	require.Error(t, yaml.Unmarshal(yml, &u))
}
0707010000009E000081A400000000000000000000000168429769000003D1000000000000000000000000000000000000004400000000k0sctl-0.25.1/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster_test.gopackage v1beta1

import (
	"testing"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/version"
	"github.com/stretchr/testify/require"
)

func TestAPIVersionValidation(t *testing.T) {
	cfg := Cluster{
		APIVersion: "wrongversion",
		Kind:       "cluster",
	}

	require.EqualError(t, cfg.Validate(), "apiVersion: must equal k0sctl.k0sproject.io/v1beta1.")
	cfg.APIVersion = APIVersion
	require.NoError(t, cfg.Validate())
}

func TestK0sVersionValidation(t *testing.T) {
	cfg := Cluster{
		APIVersion: APIVersion,
		Kind:       "cluster",
		Spec: &cluster.Spec{
			K0s: &cluster.K0s{
				Version: version.MustParse("0.1.0"),
			},
			Hosts: cluster.Hosts{
				&cluster.Host{Role: "controller"},
			},
		},
	}

	err := cfg.Validate()
	require.Error(t, err)
	require.Contains(t, err.Error(), "minimum supported k0s version")
	cfg.Spec.K0s.Version = version.MustParse(cluster.K0sMinVersion)
	require.NoError(t, cfg.Validate())
}
0707010000009F000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001B00000000k0sctl-0.25.1/pkg/manifest070701000000A0000081A400000000000000000000000168429769000012FD000000000000000000000000000000000000002500000000k0sctl-0.25.1/pkg/manifest/reader.gopackage manifest

import (
	"bufio"
	"bytes"
	"fmt"
	"io"
	"os"
	"path"
	"regexp"
	"strings"
	"time"

	"gopkg.in/yaml.v2"
)

// ResourceDefinition represents a single Kubernetes resource definition.
type ResourceDefinition struct {
	APIVersion string `yaml:"apiVersion"`
	Kind       string `yaml:"kind"`
	Metadata   struct {
		Name string `yaml:"name"`
	} `yaml:"metadata"`
	Origin string `yaml:"-"`
	Raw    []byte `yaml:"-"`
}

var fnRe = regexp.MustCompile(`[^\w\-\.]`)

func safeFn(input string) string {
	safe := fnRe.ReplaceAllString(input, "_")
	safe = strings.Trim(safe, "._")
	return safe
}

// Filename returns a filename compatible name of the resource definition.
func (rd *ResourceDefinition) Filename() string {
	if strings.HasSuffix(rd.Origin, ".yaml") || strings.HasSuffix(rd.Origin, ".yml") {
		return path.Base(rd.Origin)
	}

	if rd.Metadata.Name != "" {
		return fmt.Sprintf("%s-%s.yaml", safeFn(rd.Kind), safeFn(rd.Metadata.Name))
	}

	return fmt.Sprintf("%s-%s-%d.yaml", safeFn(rd.APIVersion), safeFn(rd.Kind), time.Now().UnixNano())
}

// returns a Reader that reads the raw resource definition
func (rd *ResourceDefinition) Reader() *bytes.Reader {
	return bytes.NewReader(rd.Raw)
}

// Bytes returns the raw resource definition.
func (rd *ResourceDefinition) Bytes() []byte {
	return rd.Raw
}

// Unmarshal unmarshals the raw resource definition into the provided object.
func (rd *ResourceDefinition) Unmarshal(obj any) error {
	if err := yaml.UnmarshalStrict(rd.Bytes(), obj); err != nil {
		return fmt.Errorf("failed to unmarshal %s: %w", rd.Origin, err)
	}
	return nil
}

func yamlDocumentSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
	if atEOF && len(data) == 0 {
		return 0, nil, nil
	}

	// Look for the document separator
	sepIndex := bytes.Index(data, []byte("\n---"))
	if sepIndex >= 0 {
		// Return everything up to the separator
		return sepIndex + len("\n---"), data[:sepIndex], nil
	}

	// If at EOF, return the remaining data
	if atEOF {
		return len(data), data, nil
	}

	// Request more data
	return 0, nil, nil
}

// Reader reads Kubernetes resource definitions from input streams.
type Reader struct {
	IgnoreErrors bool
	manifests    []*ResourceDefinition
}

func name(r io.Reader) string {
	if n, ok := r.(*os.File); ok {
		return n.Name()
	}
	return "manifest"
}

// Parse parses Kubernetes resource definitions from the provided input stream. They are then available via the Resources() or GetResources(apiVersion, kind) methods.
func (r *Reader) Parse(input io.Reader) error {
	scanner := bufio.NewScanner(input)
	scanner.Split(yamlDocumentSplit)

	for scanner.Scan() {
		rawChunk := scanner.Bytes()

		// Skip empty chunks
		if len(rawChunk) == 0 {
			continue
		}

		rd := &ResourceDefinition{}
		if err := yaml.Unmarshal(rawChunk, rd); err != nil {
			if r.IgnoreErrors {
				continue
			}
			return fmt.Errorf("failed to decode resource %s: %w", name(input), err)
		}

		if rd.APIVersion == "" || rd.Kind == "" {
			if r.IgnoreErrors {
				continue
			}
			return fmt.Errorf("missing apiVersion or kind in resource %s", name(input))
		}

		// Store the raw chunk
		rd.Raw = append([]byte{}, rawChunk...)
		r.manifests = append(r.manifests, rd)
	}

	if err := scanner.Err(); err != nil {
		return fmt.Errorf("error reading input: %w", err)
	}

	return nil
}

// ParseString parses Kubernetes resource definitions from the provided string.
func (r *Reader) ParseString(input string) error {
	return r.Parse(strings.NewReader(input))
}

// ParseBytes parses Kubernetes resource definitions from the provided byte slice.
func (r *Reader) ParseBytes(input []byte) error {
	return r.Parse(bytes.NewReader(input))
}

// Resources returns all parsed Kubernetes resource definitions.
func (r *Reader) Resources() []*ResourceDefinition {
	return r.manifests
}

// Len returns the number of parsed Kubernetes resource definitions.
func (r *Reader) Len() int {
	return len(r.manifests)
}

// FilterResources returns all parsed Kubernetes resource definitions that match the provided filter function.
func (r *Reader) FilterResources(filter func(rd *ResourceDefinition) bool) []*ResourceDefinition {
	var resources []*ResourceDefinition
	for _, rd := range r.manifests {
		if filter(rd) {
			resources = append(resources, rd)
		}
	}
	return resources
}

// GetResources returns all parsed Kubernetes resource definitions that match the provided apiVersion and kind. The matching is case-insensitive.
func (r *Reader) GetResources(apiVersion, kind string) ([]*ResourceDefinition, error) {
	resources := r.FilterResources(func(rd *ResourceDefinition) bool {
		return strings.EqualFold(rd.APIVersion, apiVersion) && strings.EqualFold(rd.Kind, kind)
	})

	if len(resources) == 0 {
		return nil, fmt.Errorf("no resources found for apiVersion=%s, kind=%s", apiVersion, kind)
	}
	return resources, nil
}
070701000000A1000081A4000000000000000000000001684297690000100A000000000000000000000000000000000000002A00000000k0sctl-0.25.1/pkg/manifest/reader_test.gopackage manifest_test

import (
	"strings"
	"testing"

	"github.com/k0sproject/k0sctl/pkg/manifest"
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
)

func TestReader_ParseIgnoreErrors(t *testing.T) {
	input := `
apiVersion: v1
kind: Pod
metadata:
  name: pod1
---
invalid_yaml
---
apiVersion: v1
kind: Service
metadata:
  name: service1
`
	reader := strings.NewReader(input)
	r := &manifest.Reader{IgnoreErrors: true}

	err := r.Parse(reader)

	// Ensure no critical errors even with invalid YAML
	require.NoError(t, err, "Parse should not return an error with IgnoreErrors=true")

	// Assert that only valid manifests are parsed
	require.Equal(t, 2, r.Len(), "Expected 2 valid manifests to be parsed")

	// Validate the parsed manifests
	assert.Equal(t, "v1", r.Resources()[0].APIVersion, "Unexpected apiVersion for Pod")
	assert.Equal(t, "Pod", r.Resources()[0].Kind, "Unexpected kind for Pod")
	assert.Equal(t, "v1", r.Resources()[1].APIVersion, "Unexpected apiVersion for Service")
	assert.Equal(t, "Service", r.Resources()[1].Kind, "Unexpected kind for Service")
}

func TestReader_ParseMultipleReaders(t *testing.T) {
	input1 := `
apiVersion: v1
kind: Pod
metadata:
  name: pod1
`
	input2 := `
apiVersion: v1
kind: Service
metadata:
  name: service1
`
	r := &manifest.Reader{}

	// Parse first reader
	err := r.Parse(strings.NewReader(input1))
	require.NoError(t, err, "Parse should not return an error for input1")

	// Parse second reader
	err = r.Parse(strings.NewReader(input2))
	require.NoError(t, err, "Parse should not return an error for input2")

	// Assert that both manifests are parsed
	require.Equal(t, 2, r.Len(), "Expected 2 manifests to be parsed")

	// Validate the parsed manifests
	pod := r.Resources()[0]
	assert.Equal(t, "v1", pod.APIVersion, "Unexpected apiVersion for Pod")
	assert.Equal(t, "Pod", pod.Kind, "Unexpected kind for Pod")
	require.Len(t, pod.Raw, len(input1))

	service := r.Resources()[1]
	assert.Equal(t, "v1", service.APIVersion, "Unexpected apiVersion for Service")
	assert.Equal(t, "Service", service.Kind, "Unexpected kind for Service")
	require.Len(t, service.Raw, len(input2))
}

func TestReader_FilterResources(t *testing.T) {
	input := `
apiVersion: v1
kind: Pod
metadata:
  name: pod1
---
apiVersion: v1
kind: Service
metadata:
  name: service1
---
apiVersion: v2
kind: Pod
metadata:
  name: pod2
`
	r := &manifest.Reader{}
	require.NoError(t, r.Parse(strings.NewReader(input)))
	v1Pods := r.FilterResources(func(rd *manifest.ResourceDefinition) bool {
		return rd.APIVersion == "v1" && rd.Kind == "Pod"
	})
	v2Pods := r.FilterResources(func(rd *manifest.ResourceDefinition) bool {
		return rd.APIVersion == "v2" && rd.Kind == "Pod"
	})
	assert.Len(t, v1Pods, 1, "Expected 2 v1 Pod to be returned")
	assert.Len(t, v2Pods, 1, "Expected 1 v2 Pod to be returned")
	assert.Equal(t, "pod1", v1Pods[0].Metadata.Name, "Unexpected name for v1 Pod")
	assert.Equal(t, "pod2", v2Pods[0].Metadata.Name, "Unexpected name for v2 Pod")
	assert.NotEmpty(t, v1Pods[0].Raw, "Expected raw data to be populated")
	assert.NotEmpty(t, v2Pods[0].Raw, "Expected raw data to be populated")
}

func TestReader_GetResources(t *testing.T) {
	input := `
apiVersion: v1
kind: Pod
metadata:
  name: pod1
---
apiVersion: v1
kind: Service
metadata:
  name: service1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod2
`
	reader := strings.NewReader(input)
	r := &manifest.Reader{}

	err := r.Parse(reader)
	require.NoError(t, err, "Parse should not return an error")

	// Query for Pods
	pods, err := r.GetResources("v1", "Pod")
	require.NoError(t, err, "GetResources should not return an error for Pods")
	assert.Len(t, pods, 2, "Expected 2 Pods to be returned")

	// Validate Pods
	assert.Equal(t, "Pod", pods[0].Kind, "Unexpected kind for the first Pod")
	assert.Equal(t, "Pod", pods[1].Kind, "Unexpected kind for the second Pod")

	// Query for Services
	services, err := r.GetResources("v1", "Service")
	require.NoError(t, err, "GetResources should not return an error for Services")
	assert.Len(t, services, 1, "Expected 1 Service to be returned")
}
070701000000A2000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001700000000k0sctl-0.25.1/pkg/node070701000000A3000081A40000000000000000000000016842976900001323000000000000000000000000000000000000002500000000k0sctl-0.25.1/pkg/node/statusfunc.gopackage node

import (
	"context"
	"encoding/json"
	"fmt"
	"strings"
	"time"

	"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
	"github.com/k0sproject/rig/exec"

	log "github.com/sirupsen/logrus"
)

// this file contains functions that return functions that can be used with pkg/retry to wait on certain
// status conditions of nodes

type retryFunc func(context.Context) error

// kubectl get node -o json
type kubeNodeStatus struct {
	Items []struct {
		Status struct {
			Conditions []struct {
				Status string `json:"status"`
				Type   string `json:"type"`
			} `json:"conditions"`
		} `json:"status"`
	} `json:"items"`
}

// kubectl get events -o json
type statusEvents struct {
	Items []struct {
		InvolvedObject struct {
			Name string `json:"name"`
		} `json:"involvedObject"`
		Reason    string    `json:"reason"`
		EventTime time.Time `json:"eventTime"`
	} `json:"items"`
}

// KubeNodeReady returns a function that returns an error unless the node is ready according to "kubectl get node"
func KubeNodeReadyFunc(h *cluster.Host) retryFunc {
	return func(_ context.Context) error {
		output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get node -l kubernetes.io/hostname=%s -o json", strings.ToLower(h.Metadata.Hostname)), exec.HideOutput(), exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("failed to get node status: %w", err)
		}
		status := &kubeNodeStatus{}
		if err := json.Unmarshal([]byte(output), status); err != nil {
			return fmt.Errorf("failed to decode kubectl get node status output: %w", err)
		}
		for _, i := range status.Items {
			for _, c := range i.Status.Conditions {
				if c.Type == "Ready" {
					if c.Status == "True" {
						return nil
					}
					return fmt.Errorf("node %s is not ready", h.Metadata.Hostname)
				}
			}
		}
		return fmt.Errorf("node %s 'Ready' condition not found", h.Metadata.Hostname)
	}
}

// K0sDynamicConfigReadyFunc returns a function that returns an error unless the k0s dynamic config has been reconciled
func K0sDynamicConfigReadyFunc(h *cluster.Host) retryFunc {
	return func(_ context.Context) error {
		output, err := h.ExecOutput(h.Configurer.K0sCmdf("kubectl --data-dir=%s -n kube-system get event --field-selector involvedObject.name=k0s -o json", h.K0sDataDir()), exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("failed to get k0s config status events: %w", err)
		}
		events := &statusEvents{}
		if err := json.Unmarshal([]byte(output), &events); err != nil {
			return fmt.Errorf("failed to decode kubectl output: %w", err)
		}
		for _, e := range events.Items {
			if e.Reason == "SuccessfulReconcile" {
				return nil
			}
		}
		return fmt.Errorf("dynamic config not ready")
	}
}

// ScheduledEventsAfterFunc returns a function that returns an error unless a kube-system 'Scheduled' event has occurred after the given time
// The  returned function is intended to be used with pkg/retry.
func ScheduledEventsAfterFunc(h *cluster.Host, since time.Time) retryFunc {
	return func(_ context.Context) error {
		output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get events --field-selector reason=Scheduled -o json"), exec.HideOutput(), exec.Sudo(h))
		if err != nil {
			return fmt.Errorf("failed to get kube system events: %w", err)
		}
		events := &statusEvents{}
		if err := json.Unmarshal([]byte(output), &events); err != nil {
			return fmt.Errorf("failed to decode kubectl output for kube-system events: %w", err)
		}
		for _, e := range events.Items {
			if e.EventTime.Before(since) {
				log.Tracef("%s: skipping prior event for %s: %s < %s", h, e.InvolvedObject.Name, e.EventTime.Format(time.RFC3339), since.Format(time.RFC3339))
				continue
			}
			log.Debugf("%s: found a 'Scheduled' event occuring after %s", h, since)
			return nil
		}
		return fmt.Errorf("didn't find any 'Scheduled' kube-system events after %s", since)
	}
}

// HTTPStatus returns a function that returns an error unless the expected status code is returned for a HTTP get to the url
func HTTPStatusFunc(h *cluster.Host, url string, expected ...int) retryFunc {
	return func(_ context.Context) error {
		return h.CheckHTTPStatus(url, expected...)
	}
}

// ServiceRunningFunc returns a function that returns an error until the service is running on the host
func ServiceRunningFunc(h *cluster.Host, service string) retryFunc {
	return func(_ context.Context) error {
		if !h.Configurer.ServiceIsRunning(h, service) {
			return fmt.Errorf("service %s is not running", service)
		}
		return nil
	}
}

// ServiceStoppedFunc returns a function that returns an error if the service is not running on the host
func ServiceStoppedFunc(h *cluster.Host, service string) retryFunc {
	return func(_ context.Context) error {
		if h.Configurer.ServiceIsRunning(h, service) {
			return fmt.Errorf("service %s is still running", service)
		}
		return nil
	}
}
070701000000A4000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001800000000k0sctl-0.25.1/pkg/retry070701000000A5000081A40000000000000000000000016842976900000E18000000000000000000000000000000000000002100000000k0sctl-0.25.1/pkg/retry/retry.go// Package retry provides simple retry wrappers for functions that return an error
package retry

import (
	"context"
	"errors"
	"fmt"
	"time"

	log "github.com/sirupsen/logrus"
)

var (
	// DefaultTimeout is a default timeout for retry operations
	DefaultTimeout = 2 * time.Minute
	// Interval is the time to wait between retry attempts
	Interval = 5 * time.Second
	// ErrAbort should be returned when an error occurs on which retrying should be aborted
	ErrAbort = errors.New("retrying aborted")
)

// Context is a retry wrapper that will retry the given function until it succeeds or the context is cancelled
func Context(ctx context.Context, f func(ctx context.Context) error) error {
	var lastErr error

	if ctx.Err() != nil {
		return ctx.Err()
	}

	// Execute the function immediately for the first try
	lastErr = f(ctx)
	if lastErr == nil || errors.Is(lastErr, ErrAbort) {
		return lastErr
	}

	ticker := time.NewTicker(Interval)
	defer ticker.Stop()

	attempt := 0

	for {
		select {
		case <-ctx.Done():
			log.Tracef("retry.Context: context cancelled after %d attempts", attempt)
			return errors.Join(ctx.Err(), lastErr)
		case <-ticker.C:
			attempt++
			if lastErr != nil {
				log.Debugf("retrying, attempt %d - last error: %v", attempt, lastErr)
			}
			lastErr = f(ctx)

			if errors.Is(lastErr, ErrAbort) {
				log.Tracef("retry.Context: aborted after %d attempts", attempt)
				return lastErr
			}

			if lastErr == nil {
				log.Tracef("retry.Context: succeeded after %d attempts", attempt)
				return nil
			} else {
				log.Tracef("retry.Context: attempt %d failed: %s", attempt, lastErr)
			}
		}
	}
}

// Timeout is a retry wrapper that will retry the given function until it succeeds, the context
// is cancelled, or the timeout is reached
func Timeout(ctx context.Context, timeout time.Duration, f func(ctx context.Context) error) error {
	ctx, cancel := context.WithTimeout(ctx, timeout)
	defer cancel()
	return Context(ctx, f)
}

// AdaptiveTimeout is like Timeout but uses the given timeout only if the given context does not have a deadline or has a deadline that only occurs after the given timeout.
func AdaptiveTimeout(ctx context.Context, timeout time.Duration, f func(ctx context.Context) error) error {
	parentDeadline, hasDeadline := ctx.Deadline()
	newDeadline := time.Now().Add(timeout)

	if hasDeadline && parentDeadline.Before(newDeadline) {
		return Context(ctx, f)
	}

	return Timeout(ctx, timeout, f)
}

// Times is a retry wrapper that will retry the given function until it succeeds or the given number of
// attempts have been made
func Times(ctx context.Context, times int, f func(context.Context) error) error {
	var lastErr error

	// Execute the function immediately for the first try
	lastErr = f(ctx)
	if lastErr == nil || errors.Is(lastErr, ErrAbort) {
		return lastErr
	}

	i := 1

	ticker := time.NewTicker(Interval)
	defer ticker.Stop()
	for {
		select {
		case <-ctx.Done():
			log.Tracef("retry.Times: context cancelled after %d attempts", i)
			return errors.Join(ctx.Err(), lastErr)
		case <-ticker.C:
			if lastErr != nil {
				log.Debugf("retrying: attempt %d of %d (previous error: %v)", i+1, times, lastErr)
			}

			lastErr = f(ctx)

			if errors.Is(lastErr, ErrAbort) {
				log.Tracef("retry.Times: aborted after %d attempts", i)
				return lastErr
			}

			if lastErr == nil {
				log.Tracef("retry.Times: succeeded on attempt %d", i)
				return nil
			}

			i++

			if i >= times {
				log.Tracef("retry.Times: exceeded %d attempts", times)
				return fmt.Errorf("retry limit exceeded after %d attempts: %w", times, lastErr)
			}
		}
	}
}
070701000000A6000081A40000000000000000000000016842976900001379000000000000000000000000000000000000002600000000k0sctl-0.25.1/pkg/retry/retry_test.gopackage retry

import (
	"context"
	"errors"
	"testing"
	"time"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
)

func TestMain(m *testing.M) {
	oldInterval := Interval
	Interval = 1 * time.Millisecond
	defer func() { Interval = oldInterval }()
	m.Run()
}

func TestContext(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	t.Run("succeeds on first try", func(t *testing.T) {
		err := Context(ctx, func(_ context.Context) error {
			return nil
		})
		require.NoError(t, err)
	})

	t.Run("fails when context is canceled between tries", func(t *testing.T) {
		var counter int
		err := Context(ctx, func(_ context.Context) error {
			counter++
			if counter == 2 {
				cancel()
			}
			return errors.New("some error")
		})
		assert.Error(t, err, "foo")
	})

	t.Run("fails with a canceled context", func(t *testing.T) {
		err := Context(ctx, func(_ context.Context) error {
			return errors.New("some error")
		})
		assert.Error(t, err, "some error")
	})
}

func TestTimeout(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	t.Run("succeeds before timeout", func(t *testing.T) {
		err := Timeout(ctx, 10*time.Second, func(_ context.Context) error {
			return nil
		})
		require.NoError(t, err)
	})

	t.Run("fails on timeout", func(t *testing.T) {
		err := Timeout(ctx, 1*time.Millisecond, func(_ context.Context) error {
			time.Sleep(2 * time.Millisecond)
			return errors.New("some error")
		})
		assert.Error(t, err, "foo")
	})

	t.Run("stops retrying on ErrAbort", func(t *testing.T) {
		var counter int
		err := Timeout(ctx, 10*time.Second, func(_ context.Context) error {
			counter++
			if counter == 2 {
				return errors.Join(ErrAbort, errors.New("some error"))
			}
			return errors.New("some error")
		})
		assert.Error(t, err, "foo")
	})
}

func TestTimes(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	t.Run("succeeds within limit", func(t *testing.T) {
		counter := 0
		err := Times(ctx, 3, func(_ context.Context) error {
			counter++
			if counter == 2 {
				return nil
			}
			return errors.New("some error")
		})
		require.NoError(t, err)
		assert.Equal(t, 2, counter)
	})

	t.Run("fails on reaching limit", func(t *testing.T) {
		var tries int
		err := Times(ctx, 2, func(_ context.Context) error {
			tries++
			return errors.New("some error")
		})
		assert.Error(t, err, "foo")
		assert.Equal(t, 2, tries)
	})

	t.Run("stops retrying on ErrAbort", func(t *testing.T) {
		var tries int
		err := Times(ctx, 2, func(_ context.Context) error {
			tries++
			return errors.Join(ErrAbort, errors.New("some error"))
		})
		assert.Error(t, err, "foo")
		assert.Equal(t, 1, tries)
	})
}

func TestAdaptiveTimeout(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	t.Run("uses existing timeout if present", func(t *testing.T) {
		parentCtx, parentCancel := context.WithTimeout(ctx, 10*time.Millisecond)
		defer parentCancel()

		start := time.Now()
		err := AdaptiveTimeout(parentCtx, 50*time.Millisecond, func(_ context.Context) error {
			time.Sleep(20 * time.Millisecond) // Should be cut off by parent timeout
			return errors.New("some error")
		})
		elapsed := time.Since(start)

		assert.Error(t, err, "some error")
		assert.Less(t, elapsed.Milliseconds(), int64(50), "should use parent timeout")
	})

	t.Run("applies new timeout if no parent timeout exists", func(t *testing.T) {
		start := time.Now()
		err := AdaptiveTimeout(ctx, 10*time.Millisecond, func(_ context.Context) error {
			time.Sleep(20 * time.Millisecond) // Should be cut off by the new timeout
			return errors.New("some error")
		})
		elapsed := time.Since(start)

		assert.Error(t, err, "some error")
		assert.GreaterOrEqual(t, elapsed.Milliseconds(), int64(10), "should use new timeout")
	})

	t.Run("uses the earlier deadline if both parent and new timeout exist", func(t *testing.T) {
		parentCtx, parentCancel := context.WithTimeout(ctx, 20*time.Millisecond)
		defer parentCancel()

		start := time.Now()
		err := AdaptiveTimeout(parentCtx, 50*time.Millisecond, func(_ context.Context) error {
			time.Sleep(30 * time.Millisecond) // Should be cut off by the parent context
			return errors.New("some error")
		})
		elapsed := time.Since(start)

		assert.Error(t, err, "some error")
		assert.Less(t, elapsed.Milliseconds(), int64(50), "should use parent timeout of 20ms")
	})

	t.Run("succeeds before timeout", func(t *testing.T) {
		err := AdaptiveTimeout(ctx, 10*time.Second, func(_ context.Context) error {
			return nil
		})
		require.NoError(t, err)
	})

	t.Run("stops retrying on ErrAbort", func(t *testing.T) {
		var counter int
		err := AdaptiveTimeout(ctx, 10*time.Second, func(_ context.Context) error {
			counter++
			return errors.Join(ErrAbort, errors.New("some error"))
		})
		assert.Error(t, err, "some error")
		assert.Equal(t, 1, counter, "should stop retrying on ErrAbort")
	})
}
070701000000A7000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001900000000k0sctl-0.25.1/smoke-test070701000000A8000081A40000000000000000000000016842976900000031000000000000000000000000000000000000002400000000k0sctl-0.25.1/smoke-test/.gitignorebootloose.yaml
id_rsa*
k0sctl_040
*.tar.gz
*.iid
070701000000A9000081A40000000000000000000000016842976900000582000000000000000000000000000000000000002E00000000k0sctl-0.25.1/smoke-test/Dockerfile.kalilinuxFROM kalilinux/kali-rolling:latest

ENV container docker

# Don't start any optional services except for the few we need.
RUN find /etc/systemd/system \
    /lib/systemd/system \
    -path '*.wants/*' \
    -not -name '*journald*' \
    -not -name '*systemd-tmpfiles*' \
    -not -name '*systemd-user-sessions*' \
    -exec rm \{} \;

RUN apt-get update && \
    apt-get install -y \
    dbus systemd openssh-server net-tools iproute2 iputils-ping curl wget vim-tiny sudo && \
    apt-get clean && \
    rm -rf /var/lib/apt/lists/*

# Truncate machine ID files to trigger regeneration.
RUN >/etc/machine-id
RUN >/var/lib/dbus/machine-id

EXPOSE 22

RUN systemctl set-default multi-user.target
RUN systemctl mask \
    dev-hugepages.mount \
    sys-fs-fuse-connections.mount \
    systemd-update-utmp.service \
    systemd-tmpfiles-setup.service \
    console-getty.service

# This container image doesn't have locales installed. Disable forwarding the
# user locale env variables or we get warnings such as:
#  bash: warning: setlocale: LC_ALL: cannot change locale
RUN sed -i -e 's/^AcceptEnv LANG LC_\*$/#AcceptEnv LANG LC_*/' /etc/ssh/sshd_config
RUN systemctl enable ssh

# This may be needed for some systemd services to start properly.
RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d

# https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
STOPSIGNAL SIGRTMIN+3

CMD ["/bin/bash"]

070701000000AA000081A40000000000000000000000016842976900000735000000000000000000000000000000000000002200000000k0sctl-0.25.1/smoke-test/Makefile
bootloose := $(shell which bootloose)
ifeq ($(bootloose),)
bootloose := $(shell go env GOPATH)/bin/bootloose
endif

envsubst := $(shell which envsubst)
ifeq ($(envsubst),)
$(error 'envsubst' NOT found in path, please install it and re-run)
endif

.PHONY: k0sctl
k0sctl:
	$(MAKE) -C .. k0sctl

$(bootloose):
	go install github.com/k0sproject/bootloose@latest

id_rsa_k0s:
	ssh-keygen -t rsa -f ./id_rsa_k0s -N ""

smoke-basic: $(bootloose) id_rsa_k0s k0sctl
	./smoke-basic.sh

smoke-basic-rootless: $(bootloose) id_rsa_k0s k0sctl
	./smoke-basic-rootless.sh

smoke-basic-openssh: $(bootloose) id_rsa_k0s k0sctl
	./smoke-basic-openssh.sh

smoke-dynamic: $(bootloose) id_rsa_k0s k0sctl
	./smoke-dynamic.sh

smoke-reinstall: $(bootloose) id_rsa_k0s k0sctl
	./smoke-reinstall.sh

smoke-files: $(bootloose) id_rsa_k0s k0sctl
	./smoke-files.sh

smoke-init: $(bootloose) id_rsa_k0s k0sctl
	./smoke-init.sh

smoke-upgrade: $(bootloose) id_rsa_k0s k0sctl
	./smoke-upgrade.sh

smoke-dryrun: $(bootloose) id_rsa_k0s k0sctl
	./smoke-dryrun.sh

smoke-reset: $(bootloose) id_rsa_k0s k0sctl
	./smoke-reset.sh

smoke-os-override: $(bootloose) id_rsa_k0s k0sctl
	BOOTLOOSE_TEMPLATE=bootloose.yaml.osoverride.tpl K0SCTL_CONFIG=k0sctl-single.yaml OS_RELEASE_PATH=$(realpath os-release) OS_OVERRIDE="ubuntu" ./smoke-basic.sh

smoke-downloadurl: $(bootloose) id_rsa_k0s k0sctl
	BOOTLOOSE_TEMPLATE=bootloose.yaml.single.tpl K0SCTL_CONFIG=k0sctl-downloadurl.yaml ./smoke-basic.sh

smoke-backup-restore: $(bootloose) id_rsa_k0s k0sctl
	./smoke-backup-restore.sh

smoke-controller-swap: $(bootloose) id_rsa_k0s k0sctl
	BOOTLOOSE_TEMPLATE=bootloose-controller-swap.yaml.tpl K0SCTL_CONFIG=k0sctl-controller-swap.yaml ./smoke-controller-swap.sh

smoke-multidoc: $(bootloose) id_rsa_k0s k0sctl
	./smoke-multidoc.sh


%.iid: Dockerfile.%
	docker build --iidfile '$@' - < '$<'
070701000000AB000081A400000000000000000000000168429769000001C7000000000000000000000000000000000000003C00000000k0sctl-0.25.1/smoke-test/bootloose-controller-swap.yaml.tplcluster:
  name: k0s
  privateKey: ./id_rsa_k0s
machines:
- count: 3
  backend: docker
  spec:
    image: $LINUX_IMAGE
    name: manager%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 443
      hostPort: 443
    - containerPort: 6443
      hostPort: 6443
070701000000AC000081A40000000000000000000000016842976900000236000000000000000000000000000000000000003700000000k0sctl-0.25.1/smoke-test/bootloose.yaml.osoverride.tplcluster:
  name: k0s
  privateKey: ./id_rsa_k0s
machines:
- count: 1
  backend: docker
  spec:
    image: quay.io/k0sproject/bootloose-ubuntu22.04
    name: manager%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    - type: bind
      source: $OS_RELEASE_PATH
      destination: /etc/os-release
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 443
      hostPort: 443
    - containerPort: 6443
      hostPort: 6443
070701000000AD000081A400000000000000000000000168429769000001E3000000000000000000000000000000000000003300000000k0sctl-0.25.1/smoke-test/bootloose.yaml.single.tplcluster:
  name: k0s
  privateKey: ./id_rsa_k0s
machines:
- count: 1
  backend: docker
  spec:
    image: quay.io/k0sproject/bootloose-ubuntu22.04
    name: manager%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 443
      hostPort: 443
    - containerPort: 6443
      hostPort: 6443
070701000000AE000081A400000000000000000000000168429769000002F6000000000000000000000000000000000000002C00000000k0sctl-0.25.1/smoke-test/bootloose.yaml.tplcluster:
  name: k0s
  privateKey: ./id_rsa_k0s
machines:
- count: 1
  backend: docker
  spec:
    image: $LINUX_IMAGE
    name: manager%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022
    - containerPort: 443
      hostPort: 443
    - containerPort: 6443
      hostPort: 6443
- count: 1
  backend: docker
  spec:
    image: $LINUX_IMAGE
    name: worker%d
    privileged: true
    volumes:
    - type: bind
      source: /lib/modules
      destination: /lib/modules
    - type: volume
      destination: /var/lib/k0s
    portMappings:
    - containerPort: 22
      hostPort: 9022070701000000AF000081A40000000000000000000000016842976900000252000000000000000000000000000000000000003500000000k0sctl-0.25.1/smoke-test/k0sctl-controller-swap.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
    - role: controller
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
    - role: controller+worker
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9024
        keyPath: ./id_rsa_k0s
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false

070701000000B0000081A400000000000000000000000168429769000001E4000000000000000000000000000000000000003100000000k0sctl-0.25.1/smoke-test/k0sctl-downloadurl.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: single
      k0sDownloadURL: https://github.com/k0sproject/k0s/releases/download/%v/k0s-%v-%p
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
      hooks:
        apply:
          before:
            - "echo hello > apply.hook"
          after:
            - "grep -q hello apply.hook"
  k0s:
    config:
      spec:
        telemetry:
          enabled: false
070701000000B1000081A400000000000000000000000168429769000001EF000000000000000000000000000000000000002C00000000k0sctl-0.25.1/smoke-test/k0sctl-dryrun.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
    - role: worker
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B2000081A400000000000000000000000168429769000001EE000000000000000000000000000000000000002D00000000k0sctl-0.25.1/smoke-test/k0sctl-dynamic.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
    - role: worker
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  k0s:
    version: "${K0S_VERSION}"
    dynamicConfig: true
    config:
      spec:
        telemetry:
          enabled: false
070701000000B3000081A4000000000000000000000001684297690000054F000000000000000000000000000000000000002F00000000k0sctl-0.25.1/smoke-test/k0sctl-files.yaml.tplapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
      files:
        - name: single file
          src: ./upload/toplevel.txt
          dst: /root/singlefile/renamed.txt
          user: test
          group: test
        - name: dest_dir
          src: ./upload/toplevel.txt
          dstDir: /root/destdir
        - name: perm644
          src: ./upload_chmod/script.sh
          dstDir: /root/chmod
          perm: 0644
        - name: permtransfer
          src: ./upload_chmod/script.sh
          dstDir: /root/chmod_exec
        - name: dir
          src: ./upload
          dstDir: /root/dir
        - name: glob
          src: ./upload/**/*.txt
          dstDir: /root/glob
          dirPerm: 0700
        - name: url
          src: https://api.github.com/repos/k0sproject/k0s/releases
          dst: /root/url/releases.json
        - name: url-destdir
          src: https://api.github.com/repos/k0sproject/k0s/releases
          dstDir: /root/url_destdir
    - role: worker
      uploadBinary: true
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  k0s:
    version: "$K0S_VERSION"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B4000081A4000000000000000000000001684297690000022E000000000000000000000000000000000000003600000000k0sctl-0.25.1/smoke-test/k0sctl-installflags.yaml.tplapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller+worker
      uploadBinary: true
      installFlags:
        - "${K0S_CONTROLLER_FLAG}"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
    - role: worker
      uploadBinary: true
      installFlags:
        - "${K0S_WORKER_FLAG}"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B5000081A4000000000000000000000001684297690000019B000000000000000000000000000000000000002D00000000k0sctl-0.25.1/smoke-test/k0sctl-openssh.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      openSSH:
        address: controller
        configPath: ssh/config
    - role: worker
      uploadBinary: true
      openSSH:
        address: worker
        configPath: ssh/config
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B6000081A400000000000000000000000168429769000002B3000000000000000000000000000000000000003200000000k0sctl-0.25.1/smoke-test/k0sctl-rootless.yaml.tplapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
        user: ${SSH_USER}
      hooks:
        apply:
          before:
            - "echo hello > apply.hook"
          after:
            - "grep -q hello apply.hook"
    - role: worker
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
        user: ${SSH_USER}
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B7000081A400000000000000000000000168429769000001DA000000000000000000000000000000000000002C00000000k0sctl-0.25.1/smoke-test/k0sctl-single.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: single
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
      hooks:
        apply:
          before:
            - "echo hello > apply.hook"
          after:
            - "grep -q hello apply.hook"
  k0s:
    version: "$K0S_VERSION"
    config:
      spec:
        telemetry:
          enabled: false070701000000B8000081A400000000000000000000000168429769000002AE000000000000000000000000000000000000002500000000k0sctl-0.25.1/smoke-test/k0sctl.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
      hooks:
        apply:
          before:
            - "echo hello > apply.hook"
          after:
            - "grep -q hello apply.hook"
    - role: worker
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  options:
    evictTaint:
      enabled: true
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false
070701000000B9000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000002200000000k0sctl-0.25.1/smoke-test/multidoc070701000000BA000081A400000000000000000000000168429769000001F0000000000000000000000000000000000000003900000000k0sctl-0.25.1/smoke-test/multidoc/k0sctl-multidoc-1.yamlapiVersion: k0sctl.k0sproject.io/v1beta1
kind: cluster
spec:
  hosts:
    - role: controller
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9022
        keyPath: ./id_rsa_k0s
    - role: worker
      uploadBinary: true
      os: "$OS_OVERRIDE"
      ssh:
        address: "127.0.0.1"
        port: 9023
        keyPath: ./id_rsa_k0s
  k0s:
    version: "${K0S_VERSION}"
    config:
      spec:
        telemetry:
          enabled: false

070701000000BB000081A40000000000000000000000016842976900000106000000000000000000000000000000000000003900000000k0sctl-0.25.1/smoke-test/multidoc/k0sctl-multidoc-2.yamlapiVersion: k0s.k0sproject.io/v1beta1
kind: clusterconfig
spec:
  extensions:
    helm:
      concurrencyLevel: 5
---
apiVersion: v1
kind: Pod
metadata:
  name: hello
spec:
  containers:
  - name: hello
    image: nginx:alpine
    ports:
    - containerPort: 80
070701000000BC000081A400000000000000000000000168429769000001A7000000000000000000000000000000000000002400000000k0sctl-0.25.1/smoke-test/os-releaseNAME="Ubuntu-override-test"
VERSION="18.04.3 LTS (Bionic Beaver)"
ID=override-test
ID_LIKE=debian
PRETTY_NAME="Override-test -- Ubuntu 18.04.3 LTS"
VERSION_ID="18.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=bionic
UBUNTU_CODENAME=bionic070701000000BD000081ED0000000000000000000000016842976900000871000000000000000000000000000000000000003100000000k0sctl-0.25.1/smoke-test/smoke-backup-restore.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl.yaml"}
OUT=${OUT:-""}

set -ex

. ./smoke.common.sh
trap runCleanup EXIT

# custom exit trap to cleanup the backup archives
runCleanup() {
    cleanup
    rm -f k0s_backup*.tar.gz || true
}

deleteCluster
createCluster
../k0sctl init
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug

# Collect some facts so we can validate restore actually did full restore
system_ns_uid=$(bootloose ssh root@manager0 -- k0s kubectl --kubeconfig "/var/lib/k0s/pki/admin.conf" get -n kube-system namespace kube-system -o template='{{.metadata.uid}}')
node_uid=$(bootloose ssh root@manager0 -- k0s kubectl --kubeconfig "/var/lib/k0s/pki/admin.conf" get node worker0 -o template='{{.metadata.uid}}')

if [ -z "${OUT}" ]; then
    echo "Backup with default output filename"
    ../k0sctl backup --config "${K0SCTL_CONFIG}" --debug
    RESTORE_FROM="$(ls -t k0s_backup_*.tar.gz 2>/dev/null | head -n1)"
    if [ ! -f "${RESTORE_FROM}" ]; then
        echo "Backup archive not found!"
        exit 1
    fi
else
    RESTORE_FROM="${OUT}"
  ../k0sctl backup --config "${K0SCTL_CONFIG}" --debug --output "${OUT}"
fi

echo "Restore from ${RESTORE_FROM} header hexdump:"
hexdump -C -n 1024 "${RESTORE_FROM}"

# Reset the controller
bootloose ssh root@manager0 -- k0s stop
bootloose ssh root@manager0 -- k0s reset

echo "Restoring from ${RESTORE_FROM}"

../k0sctl apply --config "${K0SCTL_CONFIG}" --debug --restore-from "${RESTORE_FROM}"

rm -f -- "${RESTORE_FROM}" || true

# Verify kube object UIDs match so we know we did full restore of the API objects
new_system_ns_uid=$(bootloose ssh root@manager0 -- k0s kubectl --kubeconfig "/var/lib/k0s/pki/admin.conf" get -n kube-system namespace kube-system -o template='{{.metadata.uid}}')
if [ "$system_ns_uid" != "$new_system_ns_uid" ]; then
    echo "kube-system UIDs do not match after restore!!!"
    exit 1
fi
new_node_uid=$(bootloose ssh root@manager0 -- k0s kubectl --kubeconfig "/var/lib/k0s/pki/admin.conf" get node worker0 -o template='{{.metadata.uid}}')
if [ "$node_uid" != "$new_node_uid" ]; then
    echo "worker0 UIDs do not match after restore!!!"
    exit 1
fi
070701000000BE000081ED0000000000000000000000016842976900000273000000000000000000000000000000000000003000000000k0sctl-0.25.1/smoke-test/smoke-basic-openssh.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl-openssh.yaml"}

set -e

. ./smoke.common.sh
trap cleanup_openssh EXIT

cleanup_openssh() {
  cleanup
  [ -f "ssh/id_rsa_k0s" ] && rm -rf .ssh
}

deleteCluster
createCluster

echo "* Create SSH config"
mkdir -p ~/.ssh
mkdir -p ssh
cp id_rsa_k0s ssh/
cat <<EOF > ssh/config
Host *
  StrictHostKeyChecking no
  UserKnownHostsFile /dev/null
  IdentityFile id_rsa_k0s
  User root
Host controller
  Hostname 127.0.0.1
  Port 9022
Host worker
  Hostname 127.0.0.1
  Port 9023
EOF

echo "* Starting apply"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
echo "* Apply OK"

070701000000BF000081ED00000000000000000000000168429769000006C1000000000000000000000000000000000000003100000000k0sctl-0.25.1/smoke-test/smoke-basic-rootless.sh#!/usr/bin/env sh

export SSH_USER=${SSH_USER:-"k0sctl-user"}
K0SCTL_CONFIG="k0sctl-rootless.yaml"

envsubst < "k0sctl-rootless.yaml.tpl" > "${K0SCTL_CONFIG}"

set -e


. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

for host in manager0 worker0; do
  echo "* Creating ${SSH_USER} on ${host}"
  bootloose ssh "root@${host}" -- groupadd --system k0sctl-admin
  bootloose ssh "root@${host}" -- useradd -m -G k0sctl-admin -p '*' "${SSH_USER}"
  bootloose ssh "root@${host}" -- echo "'%k0sctl-admin ALL=(ALL)NOPASSWD:ALL'" '>/etc/sudoers.d/k0sctl-admin'
  bootloose ssh "root@${host}" -- chmod 0440 /etc/sudoers.d/k0sctl-admin
  bootloose ssh "root@${host}" -- mkdir -p "/home/${SSH_USER}/.ssh"
  bootloose ssh "root@${host}" -- cp '/root/.ssh/*' "/home/${SSH_USER}/.ssh/"
  bootloose ssh "root@${host}" -- chown -R "${SSH_USER}:${SSH_USER}" "/home/${SSH_USER}/.ssh"
done

echo "* Starting apply"
../k0sctl apply --config "${K0SCTL_CONFIG}" --kubeconfig-out applykubeconfig --debug
echo "* Apply OK"

echo "* Verify hooks were executed on the host"
bootloose ssh root@manager0 -- grep -q hello "~${SSH_USER}/apply.hook"

echo "* Verify 'k0sctl kubeconfig' output includes 'data' block"
../k0sctl kubeconfig --config k0sctl.yaml | grep -v -- "-data"

echo "* Run kubectl on controller"
bootloose ssh root@manager0 -- k0s kubectl get nodes

echo "* Downloading kubectl for local test"
downloadKubectl

echo "* Using the kubectl from apply"
./kubectl --kubeconfig applykubeconfig get nodes

echo "* Using k0sctl kubecofig locally"
../k0sctl kubeconfig --config k0sctl.yaml > kubeconfig

echo "* Output:"
grep -v -- -data kubeconfig

echo "* Running kubectl"
./kubectl --kubeconfig kubeconfig get nodes
echo "* Done"
070701000000C0000081ED0000000000000000000000016842976900000400000000000000000000000000000000000000002800000000k0sctl-0.25.1/smoke-test/smoke-basic.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl.yaml"}

set -e


. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

echo "* Starting apply"
../k0sctl apply --config "${K0SCTL_CONFIG}" --kubeconfig-out applykubeconfig --debug
echo "* Apply OK"

echo "* Verify hooks were executed on the host"
bootloose ssh root@manager0 -- grep -q hello apply.hook

echo "* Verify 'k0sctl kubeconfig' output includes 'data' block"
../k0sctl kubeconfig --config k0sctl.yaml | grep -v -- "-data"

echo "* Run kubectl on controller"
bootloose ssh root@manager0 -- k0s kubectl get nodes

echo "* Downloading kubectl for local test"
downloadKubectl

echo "* Using the kubectl from apply"
./kubectl --kubeconfig applykubeconfig get nodes

echo "* Using k0sctl kubecofig locally"
../k0sctl kubeconfig --config k0sctl.yaml --user smoke --cluster test > kubeconfig

echo "* Output:"
grep -v -- -data kubeconfig

echo "* Running kubectl"
./kubectl --kubeconfig kubeconfig --user smoke --cluster test get nodes
echo "* Done"
070701000000C1000081ED0000000000000000000000016842976900000630000000000000000000000000000000000000003200000000k0sctl-0.25.1/smoke-test/smoke-controller-swap.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl-controller-swap.yaml"}

set -ex


. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

echo "* Starting apply"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
echo "* Apply OK"

echo "* Get the ip of the last controller"
controllerip=$(bootloose show manager2 -o json | grep '"ip"' | head -1 | cut -d'"' -f4)

echo "* Wipe controller 3"
docker rm -fv "$(bootloose show manager2 -o json | grep '"container"' | head -1 | cut -d'"' -f4)"

echo "* Verify its gone"
bootloose show manager2 | grep "Not created"

echo "* Recreate controller2"
createCluster

echo "* Verify its back and IP is the same"
bootloose show manager2 | grep "Running"
newip=$(bootloose show manager2 -o json | grep '"ip"' | head -1 | cut -d'"' -f4)
if [ "$controllerip" != "$newip" ]; then
  echo "IP mismatch: $controllerip != $newip - ip should get reused"
  exit 1
fi

echo "* Re-apply should fail because of known hosts"
if ../k0sctl apply --config "${K0SCTL_CONFIG}" --debug; then
  echo "Re-apply should have failed because of known hosts"
  exit 1
fi

echo "* Clear known hosts"
truncate -s 0 ~/.ssh/known_hosts

echo "* Re-apply should fail because of replaced controller"
if ../k0sctl apply --config "${K0SCTL_CONFIG}" --debug; then
  echo "Re-apply should have failed because of replaced controller"
  exit 1
fi

echo "* Perform etcd member removal"
bootloose ssh root@manager0 -- k0s etcd leave --peer-address "$controllerip"

echo "* Re-apply should succeed"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug

echo "* Done"
070701000000C2000081ED0000000000000000000000016842976900000CBB000000000000000000000000000000000000002900000000k0sctl-0.25.1/smoke-test/smoke-dryrun.sh#!/usr/bin/env bash

# Default values for environment variables
K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl-dryrun.yaml"}
K0S_FROM=${K0S_FROM:-"v1.21.6+k0s.0"}
K0S_TO=${K0S_TO:-"$(curl -s "https://docs.k0sproject.io/stable.txt")"}

log="smoke-dryrun.log"

# Source common functions
. ./smoke.common.sh
trap cleanup EXIT

# Define functions
remoteCommand() {
  local userhost="$1"
  shift
  bootloose ssh "${userhost}" -- "$*"
}

colorEcho() {
  local color=$1
  shift
  echo -e "\033[1;3${color}m************************************************************\033[0m"
  echo -e "\033[1;3${color}m$*\033[0m"
  echo -e "\033[1;3${color}m************************************************************\033[0m"
}

checkDryRunLines() {
  local mode=$1
  local expected=$2
  local count
  count=$(grep -c "dry-run" "${log}")
  case "${mode}" in
    min)
      if [ "${count}" -lt "${expected}" ]; then
        colorEcho 1 "Expected at least ${expected} dry-run lines, got ${count}"
        exit 1
      fi
      ;;
    none)
      if [ "${count}" -ne 0 ]; then
        colorEcho 1 "Expected zero dry-run lines, got ${count}"
        exit 1
      fi
      ;;
    *)
      echo "Unknown mode for checkDryRunLines"
      exit 1
      ;;
  esac
}

dryRunNoChanges() {
  if ! grep -q "no cluster state altering actions" "${log}"; then
    colorEcho 1 "Expected dry-run to have no changes"
    exit 1
  fi
}

dumpDryRunLines() {
  colorEcho 2 "Dry-run filtered log:"
  grep "dry-run" "${log}"
}

expectK0sVersion() {
  local expected=$1
  local remote
  remote=$(remoteCommand "root@manager0" "k0s version")
  if [ "${remote}" != "${expected}" ]; then
    colorEcho 1 "Expected k0s version ${expected}, got ${remote}"
    exit 1
  fi
}

expectNoK0s() {
  echo "Expecting no k0s on controller"
  if remoteCommand "root@manager0" "test -d /etc/k0s"; then
    colorEcho 1 "Expected no /etc/k0s on controller"
    exit 1
  fi
  if remoteCommand "root@manager0" "test -f /etc/k0s/k0s.yaml"; then
    colorEcho 1 "Expected no /etc/k0s/k0s.yaml on controller"
    exit 1
  fi
  if remoteCommand "root@manager0" "ps -ef" | grep -q "k0s controller"; then
    colorEcho 1 "Expected no k0s controller process on controller"
    exit 1
  fi
}

applyConfig() {
  local extra_flag=$1
  ../k0sctl apply --config "${K0SCTL_CONFIG}" --debug "${extra_flag}" | tee "${log}"
}

deleteCluster
createCluster

K0S_VERSION="${K0S_FROM}"

colorEcho 3 "Installing ${K0S_VERSION} with --dry-run"
applyConfig "--dry-run"
expectNoK0s
checkDryRunLines min 3
dumpDryRunLines

colorEcho 3 "Installing ${K0S_VERSION}"
applyConfig
expectK0sVersion "${K0S_FROM}"
checkDryRunLines none

colorEcho 3 "Installing ${K0S_VERSION} with --dry-run again"
applyConfig "--dry-run"
expectK0sVersion "${K0S_FROM}"
dryRunNoChanges

colorEcho 4 "Succesfully installed ${K0S_FROM}, moving on to upgrade to ${K0S_TO}"
K0S_VERSION="${K0S_TO}"

colorEcho 3 "Upgrading to ${K0S_VERSION} with --dry-run"
applyConfig "--dry-run"
expectK0sVersion "${K0S_FROM}"
checkDryRunLines min 3
dumpDryRunLines

colorEcho 3 "Upgrading to ${K0S_VERSION}"
applyConfig
expectK0sVersion "${K0S_TO}"
checkDryRunLines none

colorEcho 3 "Upgrading to ${K0S_VERSION} with --dry-run again"
applyConfig "--dry-run"
expectK0sVersion "${K0S_TO}"
dryRunNoChanges
070701000000C3000081ED00000000000000000000000168429769000002CA000000000000000000000000000000000000002A00000000k0sctl-0.25.1/smoke-test/smoke-dynamic.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl-dynamic.yaml"}

set -e


. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

echo "* Starting apply"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
echo "* Apply OK"

max_retry=5
counter=0
echo "* Verifying dynamic config reconciliation was a success"
until ../k0sctl config status -o json --config "${K0SCTL_CONFIG}" | grep -q "SuccessfulReconcile"
do
   [ $counter -eq $max_retry ] && echo "Failed!" && exit 1
   echo "* Waiting for a couple of seconds to retry"
   sleep 5
   counter=$((counter+1))
done

echo "* OK"

echo "* Dynamic config reconciliation status:"
../k0sctl config status --config "${K0SCTL_CONFIG}"

echo "* Done"
070701000000C4000081ED0000000000000000000000016842976900000CF4000000000000000000000000000000000000002800000000k0sctl-0.25.1/smoke-test/smoke-files.sh#!/usr/bin/env sh

K0SCTL_TEMPLATE=${K0SCTL_TEMPLATE:-"k0sctl.yaml.tpl"}

set -e

. ./smoke.common.sh
trap cleanup EXIT

envsubst < k0sctl-files.yaml.tpl > k0sctl.yaml

deleteCluster
createCluster

remoteCommand() {
  local userhost="$1"
  shift
  bootloose ssh "${userhost}" -- "$@"
}

remoteFileExist() {
  local userhost="$1"
  local path="$2"
  remoteCommand "${userhost}" test -e "${path}"
}

remoteFileContent() {
  local userhost="$1"
  local path="$2"
  remoteCommand "${userhost}" cat "${path}"
}

echo "* Creating random files"
mkdir -p upload
mkdir -p upload/nested
mkdir -p upload_chmod

head -c 8192 </dev/urandom > upload/toplevel.txt
head -c 8192 </dev/urandom > upload/nested/nested.txt
head -c 8192 </dev/urandom > upload/nested/exclude-on-glob
cat << EOF > upload_chmod/script.sh
#!/bin/sh
echo hello
EOF
chmod 0744 upload_chmod/script.sh

echo "* Creating test user"
remoteCommand root@manager0 useradd test

echo "* Starting apply"
../k0sctl apply --config k0sctl.yaml --debug

echo "* Verifying uploads"
remoteCommand root@manager0 "apt-get update > /dev/null && apt-get install tree > /dev/null && tree -fp"

printf %s "  - Single file using destination file path and user:group .. "
remoteFileExist root@manager0 /root/singlefile/renamed.txt
printf %s "[exist]"
remoteCommand root@manager0 stat -c '%U:%G' /root/singlefile/renamed.txt | grep -q test:test
printf %s "[stat]"
echo "OK"

printf %s "  - Single file using destination dir .. "
remoteFileExist root@manager0 /root/destdir/toplevel.txt
echo "OK"

printf %s "  - PermMode 644 .. "
remoteFileExist root@manager0 /root/chmod/script.sh
printf %s "[exist]"
remoteCommand root@manager0 stat -c '%a' /root/chmod/script.sh | grep -q 644
printf %s "[stat] "
echo "OK"

printf %s "  - PermMode transfer .."
remoteFileExist root@manager0 /root/chmod_exec/script.sh
printf %s "[exist] "
remoteCommand root@manager0 stat -c '%a' /root/chmod_exec/script.sh | grep -q 744
printf %s "[stat] "
remoteCommand root@manager0 /root/chmod_exec/script.sh | grep -q hello
printf %s "[run] "
echo "OK"

printf %s "  - Directory using destination dir .. "
remoteFileExist root@manager0 /root/dir/toplevel.txt
printf %s "[1] "
remoteFileExist root@manager0 /root/dir/nested/nested.txt
printf %s "[2] "
remoteFileExist root@manager0 /root/dir/nested/exclude-on-glob
printf %s "[3] "
echo "OK"

printf %s "  - Glob using destination dir .. "
remoteFileExist root@manager0 /root/glob/toplevel.txt
printf %s "[1] "
remoteFileExist root@manager0 /root/glob/nested/nested.txt
printf %s "[2] "
if remoteFileExist root@manager0 /root/glob/nested/exclude-on-glob; then exit 1; fi
printf %s "[3] "
remoteCommand root@manager0 stat -c '%a' /root/glob | grep -q 700
printf %s "[stat1]"
remoteCommand root@manager0 stat -c '%a' /root/glob/nested | grep -q 700
printf %s "[stat2]"
echo "OK"

printf %s "  - URL using destination file .. "
remoteFileExist root@manager0 /root/url/releases.json
printf %s "[exist] "
remoteFileContent root@manager0 /root/url/releases.json | grep -q html_url
printf %s "[content] "
echo "OK"

printf %s "  - URL using destination dir .. "
remoteFileExist root@manager0 /root/url_destdir/releases
printf %s "[exist] "
remoteFileContent root@manager0 /root/url_destdir/releases | grep -q html_url
printf %s "[content] "
echo "OK"

echo "* Done"

070701000000C5000081ED00000000000000000000000168429769000000CD000000000000000000000000000000000000002700000000k0sctl-0.25.1/smoke-test/smoke-init.sh#!/usr/bin/env sh

set -e

. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster
../k0sctl init --key-path ./id_rsa_k0s 127.0.0.1:9022 root@127.0.0.1:9023 | ../k0sctl apply --config - --debug
070701000000C6000081ED000000000000000000000001684297690000051A000000000000000000000000000000000000002B00000000k0sctl-0.25.1/smoke-test/smoke-multidoc.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl.yaml"}

set -e


. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

remoteCommand() {
  local userhost="$1"
  shift
  bootloose ssh "${userhost}" -- "$@"
}

echo "* Starting apply"
../k0sctl apply --config multidoc/ --kubeconfig-out applykubeconfig --debug
echo "* Apply OK"

echo "* Downloading kubectl for local test"
downloadKubectl
    
export KUBECONFIG=applykubeconfig 

echo "*Waiting until the test pod is running"
./kubectl wait --for=condition=Ready pod/hello --timeout=120s

retries=10
delay=2
nginx_ready=false
i=1

while [ "$i" -le "$retries" ]; do
    echo "* Attempt $i: Checking if nginx is ready..."
    if kubectl exec pod/hello -- curl -s http://localhost/ | grep -q "Welcome to nginx!"; then
        echo "nginx is ready!"
        nginx_ready=true
        break
    fi
    echo "  - nginx is not ready"
    sleep $delay
    i=$((i + 1))
done

if [ "$nginx_ready" = false ]; then
    echo "nginx failed to become ready after $retries attempts."
    exit 1
fi

echo " - nginx is ready"

remoteCommand root@manager0 "cat /etc/k0s/k0s.yaml" > k0syaml
echo Resulting k0s.yaml:
cat k0syaml
echo "* Verifying config merging works"
grep -q "concurrencyLevel: 5" k0syaml
grep -q "enabled: false" k0syaml

echo "* Done"

070701000000C7000081ED00000000000000000000000168429769000006EF000000000000000000000000000000000000002C00000000k0sctl-0.25.1/smoke-test/smoke-reinstall.sh#!/usr/bin/env bash

K0SCTL_CONFIG="k0sctl-installflags.yaml"

export K0S_CONTROLLER_FLAG="--labels=smoke-stage=1"
export K0S_WORKER_FLAG="--labels=smoke-stage=1"
envsubst < "k0sctl-installflags.yaml.tpl" > "${K0SCTL_CONFIG}"

set -e

. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster

remoteCommand() {
  local userhost="$1"
  shift
  echo "* Running command on ${userhost}: $*"
  bootloose ssh "${userhost}" -- "$*"
}

echo "Installing ${K0S_VERSION}"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log
echo "Initial apply should not perform a re-install"
grep -ivq "reinstalling" apply.log

echo "Install flags should contain the expected flag on a controller"
remoteCommand "root@manager0" "k0s status -o json | grep -q -- ${K0S_CONTROLLER_FLAG}"

echo "Install flags should contain the expected flag on a worker"
remoteCommand "root@worker0" "k0s status -o json | grep -q -- ${K0S_WORKER_FLAG}"

echo "A re-apply should not re-install if there are no changes"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log
grep -ivq "reinstalling" apply.log

export K0S_CONTROLLER_FLAG="--labels=smoke-stage=2" 
export K0S_WORKER_FLAG="--labels=smoke-stage=2" 
envsubst < "k0sctl-installflags.yaml.tpl" > "${K0SCTL_CONFIG}"

echo "Re-applying ${K0S_VERSION} with modified installFlags"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log
echo "A re-apply should perform a re-install if there are changes"
grep -iq "reinstalling" apply.log

sleep 5

echo "Install flags should change for controller"
remoteCommand "root@manager0" "k0s status -o json | grep -q -- ${K0S_CONTROLLER_FLAG}"

echo "Install flags should change for worker"
remoteCommand "root@worker0" "k0s status -o json | grep -q -- ${K0S_WORKER_FLAG}"
070701000000C8000081ED000000000000000000000001684297690000013D000000000000000000000000000000000000002800000000k0sctl-0.25.1/smoke-test/smoke-reset.sh#!/usr/bin/env sh

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl.yaml"}

set -e

. ./smoke.common.sh
trap cleanup EXIT

deleteCluster
createCluster
echo "* Applying"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
echo "* Resetting"
../k0sctl reset --config "${K0SCTL_CONFIG}" --debug --force
echo "* Done, cleaning up"
070701000000C9000081ED0000000000000000000000016842976900000310000000000000000000000000000000000000002A00000000k0sctl-0.25.1/smoke-test/smoke-upgrade.sh#!/usr/bin/env bash

K0SCTL_CONFIG=${K0SCTL_CONFIG:-"k0sctl.yaml"}

set -e

. ./smoke.common.sh
trap cleanup EXIT


deleteCluster
createCluster

remoteCommand() {
  local userhost="$1"
  shift
  echo "* Running command on ${userhost}: $*"
  bootloose ssh "${userhost}" -- "$*"
}

# Create config with older version and apply
K0S_VERSION="${K0S_FROM}"
echo "Installing ${K0S_VERSION}"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
remoteCommand "root@manager0" "k0s version | grep -q ${K0S_FROM}"

K0S_VERSION=$(curl -s "https://docs.k0sproject.io/stable.txt")

# Create config with latest version and apply as upgrade
echo "Upgrading to k0s ${K0S_VERSION}"
../k0sctl apply --config "${K0SCTL_CONFIG}" --debug
remoteCommand "root@manager0" "k0s version | grep -q ${K0S_VERSION}"
070701000000CA000081A40000000000000000000000016842976900000394000000000000000000000000000000000000002900000000k0sctl-0.25.1/smoke-test/smoke.common.shBOOTLOOSE_TEMPLATE=${BOOTLOOSE_TEMPLATE:-"bootloose.yaml.tpl"}

export LINUX_IMAGE="${LINUX_IMAGE:-"quay.io/k0sproject/bootloose-ubuntu22.04"}"
export PRESERVE_CLUSTER="${PRESERVE_CLUSTER:-""}"
export K0S_VERSION

createCluster() {
  envsubst < "${BOOTLOOSE_TEMPLATE}" > bootloose.yaml
  bootloose create
}

deleteCluster() {
  # cleanup any existing cluster
  envsubst < "${BOOTLOOSE_TEMPLATE}" > bootloose.yaml
  bootloose delete && docker volume prune -f
}


cleanup() {
    echo "Cleaning up..."

    if [ -z "${PRESERVE_CLUSTER}" ]; then
      deleteCluster
    fi
}

downloadKubectl() {
    OS=$(uname | tr '[:upper:]' '[:lower:]')
    ARCH="amd64"
    case $(uname -m) in
        arm,arm64) ARCH="arm64" ;;
    esac
    [ -f kubectl ] || (curl -L https://storage.googleapis.com/kubernetes-release/release/v1.28.2/bin/"${OS}"/${ARCH}/kubectl > ./kubectl && chmod +x ./kubectl)
    ./kubectl version --client
}
070701000000CB000041ED0000000000000000000000026842976900000000000000000000000000000000000000000000001600000000k0sctl-0.25.1/version070701000000CC000081A400000000000000000000000168429769000001C0000000000000000000000000000000000000002100000000k0sctl-0.25.1/version/version.gopackage version

import (
	"strings"

	"github.com/carlmjohnson/versioninfo"
)

var (
	// Version of the product, is set during the build
	Version = versioninfo.Version
	// GitCommit is set during the build
	GitCommit = versioninfo.Revision
	// Environment of the product, is set during the build
	Environment = "development"
)

// IsPre is true when the current version is a prerelease
func IsPre() bool {
	return strings.Contains(Version, "-")
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!939 blocks
openSUSE Build Service is sponsored by