File ctlptl-0.8.43.obscpio of Package ctlptl

07070100000000000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001800000000ctlptl-0.8.43/.circleci07070100000001000081A400000000000000000000000168AFB0EA00000648000000000000000000000000000000000000002300000000ctlptl-0.8.43/.circleci/Dockerfile# Builds a Docker image with:
# - ctlptl
# - docker
# - kubectl
# - kind
# - socat
# - golang build toolchain
#
# Similar to the release image (which contains everything BUT the build
# toolchain)

FROM golang:1.24-bookworm

RUN apt update && apt install -y curl ca-certificates liblz4-tool rsync socat gpg

# Install docker CLI
RUN set -exu \
  # Add Docker's official GPG key:
  && install -m 0755 -d /etc/apt/keyrings \
  && curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc \
  && chmod a+r /etc/apt/keyrings/docker.asc \
  # Add the repository to Apt sources: 
  && echo \
    "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
    $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
  tee /etc/apt/sources.list.d/docker.list > /dev/null \
  && apt update \
  && apt install -y docker-ce-cli=5:25.0.3-1~debian.12~bookworm docker-buildx-plugin

# Install kubectl client
ENV KUBECTL_VERSION=v1.31.0
RUN curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" \
    && curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl.sha256" \
    && echo "$(cat kubectl.sha256)  kubectl" | sha256sum --check \
    && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

# install Kind
ENV KIND_VERSION=v0.30.0
RUN set -exu \
  && curl -fLo ./kind-linux-amd64 "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64" \
  && chmod +x ./kind-linux-amd64 \
  && mv ./kind-linux-amd64 /usr/local/bin/kind

07070100000002000081A400000000000000000000000168AFB0EA00001383000000000000000000000000000000000000002300000000ctlptl-0.8.43/.circleci/config.ymlversion: 2.1
orbs:
  slack: circleci/slack@3.3.0
  kubernetes: circleci/kubernetes@0.11.1
jobs:
  build:
    docker:
      - image: cimg/go:1.24
    steps:
      - checkout
      - run: go get -v -t -d ./...
      - run: go test -v ./...
      - run: cd .. && go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.6
      - run: make golangci-lint
      - slack/notify-on-failure:
          only_for_branches: main
  e2e-remote-docker:
    docker:
      - image: "docker/tilt-ctlptl-ci@sha256:52badf42e6a79490439af2f319626f982526bb92956efaee4f0dd0824e9062d5"
    steps:
      - checkout
      - setup_remote_docker
      - run: make install
      - run: test/kind/e2e.sh
  e2e:
    machine:
      image: ubuntu-2204:2023.04.2
    steps:
      - checkout
      - kubernetes/install-kubectl
      - run: |
          set -ex
          wget https://golang.org/dl/go1.24.0.linux-amd64.tar.gz
          sudo rm -fR /usr/local/go
          sudo tar -C /usr/local -xzf go1.24.0.linux-amd64.tar.gz
      - run: |
          set -ex
          export MINIKUBE_VERSION=v1.34.0
          curl -fLo ./minikube-linux-amd64 "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64"
          chmod +x ./minikube-linux-amd64
          sudo mv ./minikube-linux-amd64 /usr/local/bin/minikube
      - run: |
          set -ex
          export KIND_VERSION=v0.30.0
          curl -fLo ./kind-linux-amd64 "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64"
          chmod +x ./kind-linux-amd64
          sudo mv ./kind-linux-amd64 /usr/local/bin/kind
      - run: |
          set -ex
          export TAG=v5.6.0
          curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
      - run: |
          set -ex
          go get -v -t -d ./...
          test/e2e.sh
      - slack/notify-on-failure:
          only_for_branches: main
  release-dry-run:
    docker:
      - image: golang:1.24-bookworm
    steps:
      - checkout
      - setup_remote_docker
      # https://discuss.circleci.com/t/arm-version-of-remote-docker/41624
      - run: ssh remote-docker "sudo apt-get update; sudo apt-get install -y qemu-user-static binfmt-support"
      - run: git fetch --tags
      - run: go install github.com/goreleaser/goreleaser/v2@latest
      - run: |
          set -e
          pushd /tmp
          apt-get update && apt-get install -y \
            ca-certificates \
            curl \
            gnupg \
            lsb-release
          mkdir -p /etc/apt/keyrings
          curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
          echo \
            "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
            $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
          apt-get update
          apt-get install -y docker-ce-cli docker-buildx-plugin
          docker --version
          rm -rf /var/lib/apt/lists/*

          popd
      - run: goreleaser --verbose --clean --skip=publish --snapshot
      - slack/notify-on-failure:
          only_for_branches: main
  release:
    docker:
      - image: golang:1.24-bookworm
    steps:
      - checkout
      - setup_remote_docker
      # https://discuss.circleci.com/t/arm-version-of-remote-docker/41624
      - run: ssh remote-docker "sudo apt-get update; sudo apt-get install -y qemu-user-static binfmt-support"
      - run: git fetch --tags
      - run: go install github.com/goreleaser/goreleaser/v2@latest
      - run: |
          set -e
          pushd /tmp
          apt-get update && apt-get install -y \
            ca-certificates \
            curl \
            gnupg \
            lsb-release
          mkdir -p /etc/apt/keyrings
          curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
          echo \
            "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
            $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
          apt-get update
          apt-get install -y docker-ce-cli docker-buildx-plugin
          docker --version
          rm -rf /var/lib/apt/lists/*

          popd
      - run: ./hack/release.sh
      - slack/status:
          mentions: "nick"
workflows:
  version: 2
  build:
    jobs:
      - build
      - e2e:
          requires:
            - build
      - e2e-remote-docker:
          requires:
            - build
      - release-dry-run:
          requires:
            - build
  release:
    jobs:
      - release:
          context:
            - Tilt Release CLI Context
            - Tilt Docker Login Context
          filters:
            branches:
              only: never-release-on-a-branch
            tags:
              only: /v[0-9]+.[0-9]+.[0-9]+/
07070100000003000081A400000000000000000000000168AFB0EA00000175000000000000000000000000000000000000001D00000000ctlptl-0.8.43/.golangci.yamlversion: "2"
linters:
  settings:
    staticcheck:
      checks:
        - all
        - "-ST1005" # error strings should not be capitalized
        - "-QF1001" # demorgan's law
        - "-ST1021" # comment forms
        - "-ST1020" # comment forms
        - "-ST1000" # comment forms
        - "-ST1016" # silly naming rules
        - "-QF1007" # silly conditional rules
07070100000004000081A400000000000000000000000168AFB0EA00001209000000000000000000000000000000000000001E00000000ctlptl-0.8.43/.goreleaser.ymlversion: 2
project_name: ctlptl
builds:
- main: ./cmd/ctlptl/main.go
  goos:
  - linux
  - windows
  - darwin
  goarch:
  - amd64
  - arm64
  env:
    - CGO_ENABLED=0
  # https://goreleaser.com/deprecations/#builds-for-windowsarm64
  ignore:
  - goos: windows
    goarch: arm64
archives:
- name_template: >-
    {{ .ProjectName }}.{{ .Version }}.
    {{- if eq .Os "darwin"}}mac
    {{- else }}{{ .Os }}{{ end }}.
    {{- if eq .Arch "amd64" }}x86_64
    {{- else if eq .Arch "386" }}i386
    {{- else }}{{ .Arch }}{{ end }}
  format_overrides:
    - goos: windows
      format: zip
checksum:
  name_template: 'checksums.txt'
snapshot:
  version_template: "{{ .Tag }}-next"

changelog:
  sort: asc
  use: github
  filters:
    exclude:
    - '^docs?:'
    - '^tests?:'
    - '^cleanup:'
    - '^circleci:'
    - '^ci:'

brews:
- repository:
    owner: tilt-dev
    name: homebrew-tap
  commit_author:
    name: Tilt Dev
    email: hi@tilt.dev
  url_template: "https://github.com/tilt-dev/ctlptl/releases/download/{{ .Tag }}/{{ .ArtifactName }}"
  homepage: "https://ctlptl.dev/"
  description: "Making local Kubernetes clusters easy to set up and tear down"
  install: |
    bin.install "ctlptl"

    # Install bash completion
    output = Utils.safe_popen_read("#{bin}/ctlptl", "completion", "bash")
    (bash_completion/"ctlptl").write output

    # Install zsh completion
    output = Utils.safe_popen_read("#{bin}/ctlptl", "completion", "zsh")
    (zsh_completion/"_ctlptl").write output

    # Install fish completion
    output = Utils.safe_popen_read("#{bin}/ctlptl", "completion", "fish")
    (fish_completion/"ctlptl.fish").write output
  test: |
    system "#{bin}/ctlptl version"
scoops:
- url_template: "https://github.com/tilt-dev/ctlptl/releases/download/{{ .Tag }}/{{ .ArtifactName }}"
  repository:
    owner: tilt-dev
    name: scoop-bucket
  commit_author:
    name: Tilt Dev
    email: hi@tilt.dev
  commit_msg_template: "Scoop update for {{ .ProjectName }} version {{ .Tag }}"
  homepage: "https://ctlptl.dev/"
  description: "Making local Kubernetes clusters easy to set up and tear down"
  license: Apache-2.0
dockers:
- goos: linux
  goarch: amd64
  image_templates:
    - "tiltdev/ctlptl:{{ .Tag }}-amd64"
    - "docker/tilt-ctlptl:{{ .Tag }}-amd64"
  dockerfile: hack/Dockerfile
  use: buildx
  build_flag_templates:
  - "--platform=linux/amd64"
  - "--label=org.opencontainers.image.title={{ .ProjectName }}"
  - "--label=org.opencontainers.image.description={{ .ProjectName }}"
  - "--label=org.opencontainers.image.url=https://github.com/tilt-dev/{{ .ProjectName }}"
  - "--label=org.opencontainers.image.source=https://github.com/tilt-dev/{{ .ProjectName }}"
  - "--label=org.opencontainers.image.version={{ .Version }}"
  - "--label=org.opencontainers.image.created={{ .Timestamp }}"
  - "--label=org.opencontainers.image.revision={{ .FullCommit }}"
  - "--label=org.opencontainers.image.licenses=Apache-2.0"
- goos: linux
  goarch: arm64
  goarm: ''
  image_templates:
    - "tiltdev/ctlptl:{{ .Tag }}-arm64"
    - "docker/tilt-ctlptl:{{ .Tag }}-arm64"
  dockerfile: hack/Dockerfile
  use: buildx
  build_flag_templates:
  - "--platform=linux/arm64"
  - "--label=org.opencontainers.image.title={{ .ProjectName }}"
  - "--label=org.opencontainers.image.description={{ .ProjectName }}"
  - "--label=org.opencontainers.image.url=https://github.com/tilt-dev/{{ .ProjectName }}"
  - "--label=org.opencontainers.image.source=https://github.com/tilt-dev/{{ .ProjectName }}"
  - "--label=org.opencontainers.image.version={{ .Version }}"
  - "--label=org.opencontainers.image.created={{ .Timestamp }}"
  - "--label=org.opencontainers.image.revision={{ .FullCommit }}"
  - "--label=org.opencontainers.image.licenses=Apache-2.0"
docker_manifests:
- name_template: tiltdev/{{ .ProjectName }}:{{ .Tag }}
  image_templates:
  - tiltdev/{{ .ProjectName }}:{{ .Tag }}-amd64
  - tiltdev/{{ .ProjectName }}:{{ .Tag }}-arm64
- name_template: tiltdev/{{ .ProjectName }}:latest
  image_templates:
  - tiltdev/{{ .ProjectName }}:{{ .Tag }}-amd64
  - tiltdev/{{ .ProjectName }}:{{ .Tag }}-arm64
- name_template: docker/tilt-{{ .ProjectName }}:{{ .Tag }}
  image_templates:
  - docker/tilt-{{ .ProjectName }}:{{ .Tag }}-amd64
  - docker/tilt-{{ .ProjectName }}:{{ .Tag }}-arm64
- name_template: docker/tilt-{{ .ProjectName }}:latest
  image_templates:
  - docker/tilt-{{ .ProjectName }}:{{ .Tag }}-amd64
  - docker/tilt-{{ .ProjectName }}:{{ .Tag }}-arm64


# Uncomment these lines if you want to experiment with other
# parts of the release process without releasing new binaries.
# release:
#  disable: true
07070100000005000081A400000000000000000000000168AFB0EA00001C83000000000000000000000000000000000000002100000000ctlptl-0.8.43/CODE_OF_CONDUCT.md# Code of Conduct

### Summary
* Treat everyone with respect and kindness
* Be thoughtful in how you communicate
* Don’t be destructive or inflammatory
* If you encounter an issue, please email [**conduct@tilt.dev**](mailto:conduct@tilt.dev)

## Goals of This Document
Windmill Engineering is committed to providing a friendly, safe, and welcoming environment for all of our users, contributors, followers, and Fans, regardless of: gender identity or expression; sexual orientation; disability; neurodivergence; physical appearance; body size; ethnicity; nationality; race; age; religion; level of technical experience; education; socio-economic status; or similar personal characteristics.

The first goal of the Code of Conduct is to specify a baseline standard of behavior so that people with different social values and communication styles can talk effectively, productively, and respectfully.

The second goal is to provide a mechanism for resolving conflicts in the community when they arise.

The third goal of the Code of Conduct is to make our community welcoming to people from different backgrounds. Diversity is critical to the project; for Windmill to be successful, it needs contributors and users from all backgrounds.

We believe that healthy debate and disagreement are essential to a healthy project and community. However, it is never okay to be disrespectful. We value diverse opinions, but we value respectful behavior more.

## Code of Conduct
### Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of gender identity or expression, sexual orientation, disability, neurodivergence, physical appearance, body size, ethnicity, nationality, race, age, religion, level of technical experience, education, socio-economic status, or similar personal characteristics.

### Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members

Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
* Publishing others’ private information, such as a physical or electronic address, without explicit permission (also known as “doxing”)
* Deliberate [misgendering](https://www.healthline.com/health/transgender/misgendering#why-it-happens). This includes [deadnaming](https://www.healthline.com/health/transgender/deadnaming) or persistently using a pronoun that does not correctly reflect a person’s gender. You must address people by the name they give you when not addressing them by their username or handle
* Physical contact and simulated physical contact (e.g., textual descriptions like `*hug*` or `*backrub*`) without consent or after a request to stop
* Threats of violence, both physical and psychological
* Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
* Harassing photography or recording, including logging online activity for harassment purposes
* Continued one-on-one communication after requests to cease
* Other conduct which could reasonably be considered inappropriate in a professional setting

Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
* “Reverse” -isms, including “reverse racism,” “reverse sexism,” and “cisphobia”
* Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
* Refusal to explain or debate social justice concepts
* Communicating in a “tone” you don’t find congenial
* Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions

### Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.

Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.

### Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include: using an official project e-mail address; posting via an official social media account; or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.

This Code of Conduct also applies outside the project spaces when the project maintainers have a reasonable belief that an individual’s behavior may have a negative impact on the project or its community.

### Conflict Resolution
We do not believe that all conflict is bad; healthy debate and disagreement often yield positive results. However, it is never okay to be disrespectful or to engage in behavior that violates the project’s Code of Conduct.

If you see someone violating the Code of Conduct, you are encouraged to address the behavior directly with those involved. Many issues can be resolved quickly and easily, and this gives people more control over the outcome of their dispute. If you are unable to resolve the matter for any reason, or if the behavior is threatening or harassing, report it. We are dedicated to providing an environment where participants feel welcome and safe.

Reports should be directed to **conduct@tilt.dev**.

We will investigate every complaint, but you may not receive a direct response. We will use our discretion in determining when and how to follow up on reported incidents, which may range from not taking action to permanent expulsion from the project and project-sponsored spaces. We will notify the accused of the report and provide them an opportunity to discuss it before any action is taken. The identity of the reporter will be omitted from the details of the report supplied to the accused. In potentially harmful situations, such as ongoing harassment or threats to anyone’s safety, we may take action without notice.

### Attribution
This Code of Conduct is adapted from the [Contributor Covenant (v1.4)](https://www.contributor-covenant.org/version/1/4/code-of-conduct), with additional content adapted from TODO Group’s [Open Code of Conduct](https://github.com/todogroup/opencodeofconduct) (no longer maintained).
07070100000006000081A400000000000000000000000168AFB0EA00000385000000000000000000000000000000000000001E00000000ctlptl-0.8.43/CONTRIBUTING.md# Hacking on ctlptl

So you want to make a change to `ctlptl`!

## Contributing

We welcome contributions, either as bug reports, feature requests, or pull requests.

We want everyone to feel at home in this repo and its environs; please see our
[**Code of Conduct**](https://docs.tilt.dev/code_of_conduct.html) for some rules
that govern everyone's participation.

## Commands

Most of the commands for building and testing `ctlptl` should be familiar
with anyone used to developing in Golang. But we have a Makefile to wrap
common commands.

### Run

```
go run ./cmd/ctlptl
```

### Install dev version

```
make install
```

### Unit tests

```
make test
```

### Integration tests

```
make e2e
```

### Release

CircleCI will automatically build ctlptl releases when you push
a new tag to main.

```
git pull origin main
git fetch --tags
git tag -a v0.x.y -m "v0.x.y"
git push origin v0.x.y
```
07070100000007000081A400000000000000000000000168AFB0EA000005F3000000000000000000000000000000000000001900000000ctlptl-0.8.43/INSTALL.md# ctlptl Installation Appendix

## Recommended

### Homebrew (Mac/Linux)

```
brew install tilt-dev/tap/ctlptl
```

### Scoop (Windows)

```
scoop bucket add tilt-dev https://github.com/tilt-dev/scoop-bucket
scoop install ctlptl
```

## Alternative

### Docker

Available on Docker Hub as [`tiltdev/ctlptl`](https://hub.docker.com/r/tiltdev/ctlptl/tags)

Contains the most recent version of `kind` and `ctlptl` for use in CI environments.

### Point and click

Visit [the releases page](https://github.com/tilt-dev/ctlptl/releases) and
download the pre-build binaries for your architecture.

### Go install

For global installation with go use the following command:
```bash
go install github.com/tilt-dev/ctlptl/cmd/ctlptl@latest
```

### Command-line

On macOS:

```bash
CTLPTL_VERSION="0.8.42"
curl -fsSL https://github.com/tilt-dev/ctlptl/releases/download/v$CTLPTL_VERSION/ctlptl.$CTLPTL_VERSION.mac.x86_64.tar.gz | sudo tar -xzv -C /usr/local/bin ctlptl
```

On Linux:

```bash
CTLPTL_VERSION="0.8.42"
curl -fsSL https://github.com/tilt-dev/ctlptl/releases/download/v$CTLPTL_VERSION/ctlptl.$CTLPTL_VERSION.linux.x86_64.tar.gz | sudo tar -xzv -C /usr/local/bin ctlptl
```

On Windows:

```powershell
$CTLPTL_VERSION = "0.8.42"
Invoke-WebRequest "https://github.com/tilt-dev/ctlptl/releases/download/v$CTLPTL_VERSION/ctlptl.$CTLPTL_VERSION.windows.x86_64.zip" -OutFile "ctlptl.zip"
Expand-Archive "ctlptl.zip" -DestinationPath "ctlptl"
Move-Item -Force -Path "ctlptl\ctlptl.exe" -Destination "$home\bin\ctlptl.exe"
```
07070100000008000081A400000000000000000000000168AFB0EA00002C5D000000000000000000000000000000000000001600000000ctlptl-0.8.43/LICENSE                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
07070100000009000081A400000000000000000000000168AFB0EA0000028D000000000000000000000000000000000000001700000000ctlptl-0.8.43/MakefileGOPATH = $(shell go env GOPATH)

.PHONY: generate test vendor publish-ci-image

install:
	CGO_ENABLED=0 go install ./cmd/ctlptl

test:
	go test -timeout 30s -v ./...

generated:
	hack/make-rules/generated.sh

fmt:
	goimports -w -l -local github.com/tilt-dev/ctlptl cmd/ internal/ pkg/

tidy:
	go mod tidy

e2e:
	test/e2e.sh

.PHONY: golangci-lint
golangci-lint: $(GOLANGCILINT)
	$(GOPATH)/bin/golangci-lint run --verbose --timeout=120s

$(GOLANGCILINT):
	(cd /; GO111MODULE=on GOPROXY="direct" GOSUMDB=off go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.6)

BUILDER=buildx-multiarch

publish-ci-image:
	./hack/publish-ci-image.sh
0707010000000A000081A400000000000000000000000168AFB0EA00000077000000000000000000000000000000000000001500000000ctlptl-0.8.43/NOTICEctlptl
Copyright 2022 Docker, Inc.

This product includes software developed at Docker, Inc. (https://www.docker.com).
0707010000000B000081A400000000000000000000000168AFB0EA00001A4C000000000000000000000000000000000000001800000000ctlptl-0.8.43/README.md# ctlptl

[![Build Status](https://circleci.com/gh/tilt-dev/ctlptl/tree/main.svg?style=shield)](https://circleci.com/gh/tilt-dev/ctlptl)
[![GoDoc](https://godoc.org/github.com/tilt-dev/ctlptl?status.svg)](https://pkg.go.dev/github.com/tilt-dev/ctlptl)

Want to mess around with Kubernetes, but don't want to spend an ocean on
hardware?

Maybe you need a `ctlptl`.

## What is ctlptl?

`ctlptl` (pronounced "cattle patrol") is a CLI for declaratively setting up
local Kubernetes clusters.

Inspired by `kubectl` and
[ClusterAPI's](https://github.com/kubernetes-sigs/cluster-api) `clusterctl`, you
declare your local cluster with YAML and use `ctlptl` to set it up.

## How do I install it?

Install your cluster of choice: [Docker for
Desktop](https://www.docker.com/products/docker-desktop),
[Kind](https://kind.sigs.k8s.io/), 
[k3d](https://k3d.io/) or
[Minikube](https://minikube.sigs.k8s.io/). Then run:

### Homebrew (Mac/Linux)

```
brew install tilt-dev/tap/ctlptl
```

### Scoop (Windows)

```
scoop bucket add tilt-dev https://github.com/tilt-dev/scoop-bucket
scoop install ctlptl
```

### Go install

```
go install github.com/tilt-dev/ctlptl/cmd/ctlptl@latest
```

### Alternative Options

If automatic installers aren't your cup of tea, check out the [installation
appendix](INSTALL.md) for more options.

## How do I use it?

`ctlptl` supports 4 major commands:

- `ctlptl get` - see all running clusters
- `ctlptl create cluster [product]` - create a cluster and make it the current `kubectl` context
- `ctlptl apply -f cluster.yaml` - ensure a cluster exists, or create one
- `ctlptl delete -f cluster.yaml` - delete a cluster and its state

### Examples

#### Docker for Mac: Enable Kubernetes and set 4 CPU

Create:

```
ctlptl docker-desktop open
ctlptl create cluster docker-desktop --min-cpus=4
```

or ensure exists:

```
cat <<EOF | ctlptl apply -f -
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: docker-desktop
minCPUs: 4
EOF
```

#### Docker for Mac: Reset and shutdown Kubernetes

```
ctlptl delete cluster docker-desktop
ctlptl docker-desktop quit
```

#### KIND: with a built-in registry at a random port

Create:

```
ctlptl create cluster kind --registry=ctlptl-registry
```

or ensure exists:

```
cat <<EOF | ctlptl apply -f -
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
EOF
```

Then fetch the URL to push images to with:

```
ctlptl get cluster kind-kind -o template --template '{{.status.localRegistryHosting.host}}'
```

#### KIND: with a built-in registry at a pre-determined port

Create:

```
ctlptl create registry ctlptl-registry --port=5005
ctlptl create cluster kind --registry=ctlptl-registry
```

or ensure exists:

```
cat <<EOF | ctlptl apply -f -
apiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: ctlptl-registry
port: 5005
---
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
EOF
```

#### K3D: with a built-in registry at a pre-determined port

Create:

```
ctlptl create registry ctlptl-registry --port=5005
ctlptl create cluster k3d --registry=ctlptl-registry
```

or ensure exists:

```
cat <<EOF | ctlptl apply -f -
apiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: ctlptl-registry
port: 5005
---
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: k3d
registry: ctlptl-registry
EOF
```

#### Minikube: with a built-in registry at Kubernetes v1.18.8

Create:

```
ctlptl create cluster minikube --registry=ctlptl-registry --kubernetes-version=v1.18.8
```

or ensure exists:

```
cat <<EOF | ctlptl apply -f -
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: minikube
registry: ctlptl-registry
kubernetesVersion: v1.18.8
EOF
```

#### Docker for Mac: Limit to 1 CPU and Disable Kubernetes

```
ctlptl docker-desktop set vm.resources.cpus 1
ctlptl docker-desktop set kubernetes.enabled false
```

#### More

For more details, see:

- Example configurations under [./examples](./examples)
- Complete CLI docs under [./docs](./docs/ctlptl.md)
- Cluster API reference under [pkg.go.dev](https://pkg.go.dev/github.com/tilt-dev/ctlptl/pkg/api#Cluster)

## Why did you make this?

At [Tilt](https://tilt.dev/), we want to make Kubernetes a nice environment for local dev.

We found ourselves spending too much time helping teams debug misconfigurations in their dev environment.

We wrote docs like [Choosing a local dev
cluster](https://docs.tilt.dev/choosing_clusters.html) and example repos like
[kind-local](https://github.com/tilt-dev/kind-local),
[minikube-local](https://github.com/tilt-dev/minikube-local), and
[k3d-local](https://github.com/tilt-dev/k3d-local-registry) to help people get set up.

`ctlptl` is a culmination of what we've learned.

## Features

### Current

- Docker for Mac
- Docker for Windows
- [KIND](https://kind.sigs.k8s.io/) and [KIND with a registry](https://kind.sigs.k8s.io/docs/user/local-registry/)
- [Minikube](https://minikube.sigs.k8s.io/) and Minikube with a registry
- [K3D](https://k3d.io/) with a registry
- Creating a cluster on a Remote Docker Host (useful in CI environments like [CircleCI](https://circleci.com/docs/2.0/building-docker-images/))
- Allocating CPUs

### Future Work

- Microk8s
- Rancher Desktop
- Podman
- Minikube on Hyperkit
- Allocating Memory
- Allocating Storage

## Community

`ctlptl` is a work in progress!

We welcome [contributions](CONTRIBUTING.md) from the Kubernetes community to help make this better.

We expect everyone -- users, contributors, followers, and employees alike -- to abide by our [**Code of Conduct**](CODE_OF_CONDUCT.md).

## Goals

- To support common local cluster setup operations, like create, delete, and reset

- To interoperate well with all local Kubernetes solutions, including `docker-desktop`, `kind`, `minikube`, `k3d`, or `microk8s`

- To connect other resources to a local cluster, like image registries, storage, and CPU/memory

- To help infra engineers manage a consistent dev environment

- To encourage standards that enable interop between devtools, like [KEP 1755](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry)

## Non-Goals

- `ctlptl` is NOT a Kubernetes setup approach that competes with `kind` or `minikube`, but rather complements these tools.

- `ctlptl` is NOT intended to help you setup a remote cluster, or a remote dev sandbox. If you want to declaratively set up prod clusters, check out [`clusterapi`](https://cluster-api.sigs.k8s.io/).

## Privacy

`ctlptl` sends anonymized usage statistics, so we can improve it on every platform. Opt out with `ctlptl analytics opt out`.

## License

Copyright 2022 Docker, Inc.

Licensed under [the Apache License, Version 2.0](LICENSE)

0707010000000C000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001200000000ctlptl-0.8.43/cmd0707010000000D000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001900000000ctlptl-0.8.43/cmd/ctlptl0707010000000E000081A400000000000000000000000168AFB0EA0000046A000000000000000000000000000000000000002100000000ctlptl-0.8.43/cmd/ctlptl/main.gopackage main

import (
	"flag"
	"fmt"
	"os"
	"runtime/debug"
	"strings"

	"github.com/spf13/cobra"
	"github.com/spf13/pflag"
	"k8s.io/klog/v2"

	"github.com/tilt-dev/ctlptl/pkg/cmd"
)

// Magic variables set by goreleaser
var version string
var date string

func main() {
	cmd.Version = version

	command := cmd.NewRootCommand()
	command.AddCommand(newVersionCommand())

	klog.InitFlags(nil)

	pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
	pflag.VisitAll(func(f *pflag.Flag) {
		f.Hidden = true
	})

	if err := command.Execute(); err != nil {
		os.Exit(1)
	}
}

func newVersionCommand() *cobra.Command {
	return &cobra.Command{
		Use:   "version",
		Short: "Current ctlptl version",
		Run: func(_ *cobra.Command, args []string) {
			fmt.Println(versionStamp())
		},
	}
}

func versionStamp() string {
	timeIndex := strings.Index(date, "T")
	if timeIndex != -1 {
		date = date[0:timeIndex]
	}

	if date == "" {
		date = "unknown"
	}

	if version == "" {
		version = "0.0.0-main"
		if buildInfo, ok := debug.ReadBuildInfo(); ok {
			version = buildInfo.Main.Version
		}
	}

	return fmt.Sprintf("v%s, built %s", version, date)
}
0707010000000F000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001300000000ctlptl-0.8.43/docs07070100000010000081A400000000000000000000000168AFB0EA00000407000000000000000000000000000000000000001D00000000ctlptl-0.8.43/docs/ctlptl.md## ctlptl

Mess around with local Kubernetes clusters without consequences

### Examples

```
  ctlptl get clusters
  ctlptl apply -f my-cluster.yaml
```

### Options

```
  -h, --help   help for ctlptl
```

### SEE ALSO

* [ctlptl analytics](ctlptl_analytics.md)	 - info and status about tilt-dev analytics
* [ctlptl apply](ctlptl_apply.md)	 - Apply a cluster config to the currently running clusters
* [ctlptl completion](ctlptl_completion.md)	 - Generate the autocompletion script for the specified shell
* [ctlptl create](ctlptl_create.md)	 - Create a cluster or registry
* [ctlptl delete](ctlptl_delete.md)	 - Delete a currently running cluster
* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client
* [ctlptl get](ctlptl_get.md)	 - Read currently running clusters and registries
* [ctlptl socat](ctlptl_socat.md)	 - Use socat to connect components. Experimental.
* [ctlptl version](ctlptl_version.md)	 - Current ctlptl version

###### Auto generated by spf13/cobra on 21-May-2025
07070100000011000081A400000000000000000000000168AFB0EA00000199000000000000000000000000000000000000002700000000ctlptl-0.8.43/docs/ctlptl_analytics.md## ctlptl analytics

info and status about tilt-dev analytics

```
ctlptl analytics
```

### Options

```
  -h, --help   help for analytics
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences
* [ctlptl analytics opt](ctlptl_analytics_opt.md)	 - opt-in or -out to tilt-dev analytics collection/upload

###### Auto generated by spf13/cobra on 21-May-2025
07070100000012000081A400000000000000000000000168AFB0EA00000142000000000000000000000000000000000000002B00000000ctlptl-0.8.43/docs/ctlptl_analytics_opt.md## ctlptl analytics opt

opt-in or -out to tilt-dev analytics collection/upload

```
ctlptl analytics opt [flags]
```

### Options

```
  -h, --help   help for opt
```

### SEE ALSO

* [ctlptl analytics](ctlptl_analytics.md)	 - info and status about tilt-dev analytics

###### Auto generated by spf13/cobra on 21-May-2025
07070100000013000081A400000000000000000000000168AFB0EA000004A9000000000000000000000000000000000000002300000000ctlptl-0.8.43/docs/ctlptl_apply.md## ctlptl apply

Apply a cluster config to the currently running clusters

```
ctlptl apply -f FILENAME [flags]
```

### Examples

```
  ctlptl apply -f cluster.yaml
  cat cluster.yaml | ctlptl apply -f -
```

### Options

```
      --allow-missing-template-keys   If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true)
  -f, --filename strings              
  -h, --help                          help for apply
  -o, --output string                 Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file).
      --show-managed-fields           If true, keep the managedFields when printing objects in JSON or YAML format.
      --template string               Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences

###### Auto generated by spf13/cobra on 21-May-2025
07070100000014000081A400000000000000000000000168AFB0EA0000036D000000000000000000000000000000000000002800000000ctlptl-0.8.43/docs/ctlptl_completion.md## ctlptl completion

Generate the autocompletion script for the specified shell

### Synopsis

Generate the autocompletion script for ctlptl for the specified shell.
See each sub-command's help for details on how to use the generated script.


### Options

```
  -h, --help   help for completion
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences
* [ctlptl completion bash](ctlptl_completion_bash.md)	 - Generate the autocompletion script for bash
* [ctlptl completion fish](ctlptl_completion_fish.md)	 - Generate the autocompletion script for fish
* [ctlptl completion powershell](ctlptl_completion_powershell.md)	 - Generate the autocompletion script for powershell
* [ctlptl completion zsh](ctlptl_completion_zsh.md)	 - Generate the autocompletion script for zsh

###### Auto generated by spf13/cobra on 21-May-2025
07070100000015000081A400000000000000000000000168AFB0EA000003CD000000000000000000000000000000000000002D00000000ctlptl-0.8.43/docs/ctlptl_completion_bash.md## ctlptl completion bash

Generate the autocompletion script for bash

### Synopsis

Generate the autocompletion script for the bash shell.

This script depends on the 'bash-completion' package.
If it is not installed already, you can install it via your OS's package manager.

To load completions in your current shell session:

	source <(ctlptl completion bash)

To load completions for every new session, execute once:

#### Linux:

	ctlptl completion bash > /etc/bash_completion.d/ctlptl

#### macOS:

	ctlptl completion bash > $(brew --prefix)/etc/bash_completion.d/ctlptl

You will need to start a new shell for this setup to take effect.


```
ctlptl completion bash
```

### Options

```
  -h, --help              help for bash
      --no-descriptions   disable completion descriptions
```

### SEE ALSO

* [ctlptl completion](ctlptl_completion.md)	 - Generate the autocompletion script for the specified shell

###### Auto generated by spf13/cobra on 21-May-2025
07070100000016000081A400000000000000000000000168AFB0EA000002F1000000000000000000000000000000000000002D00000000ctlptl-0.8.43/docs/ctlptl_completion_fish.md## ctlptl completion fish

Generate the autocompletion script for fish

### Synopsis

Generate the autocompletion script for the fish shell.

To load completions in your current shell session:

	ctlptl completion fish | source

To load completions for every new session, execute once:

	ctlptl completion fish > ~/.config/fish/completions/ctlptl.fish

You will need to start a new shell for this setup to take effect.


```
ctlptl completion fish [flags]
```

### Options

```
  -h, --help              help for fish
      --no-descriptions   disable completion descriptions
```

### SEE ALSO

* [ctlptl completion](ctlptl_completion.md)	 - Generate the autocompletion script for the specified shell

###### Auto generated by spf13/cobra on 21-May-2025
07070100000017000081A400000000000000000000000168AFB0EA000002D0000000000000000000000000000000000000003300000000ctlptl-0.8.43/docs/ctlptl_completion_powershell.md## ctlptl completion powershell

Generate the autocompletion script for powershell

### Synopsis

Generate the autocompletion script for powershell.

To load completions in your current shell session:

	ctlptl completion powershell | Out-String | Invoke-Expression

To load completions for every new session, add the output of the above command
to your powershell profile.


```
ctlptl completion powershell [flags]
```

### Options

```
  -h, --help              help for powershell
      --no-descriptions   disable completion descriptions
```

### SEE ALSO

* [ctlptl completion](ctlptl_completion.md)	 - Generate the autocompletion script for the specified shell

###### Auto generated by spf13/cobra on 21-May-2025
07070100000018000081A400000000000000000000000168AFB0EA000003F5000000000000000000000000000000000000002C00000000ctlptl-0.8.43/docs/ctlptl_completion_zsh.md## ctlptl completion zsh

Generate the autocompletion script for zsh

### Synopsis

Generate the autocompletion script for the zsh shell.

If shell completion is not already enabled in your environment you will need
to enable it.  You can execute the following once:

	echo "autoload -U compinit; compinit" >> ~/.zshrc

To load completions in your current shell session:

	source <(ctlptl completion zsh)

To load completions for every new session, execute once:

#### Linux:

	ctlptl completion zsh > "${fpath[1]}/_ctlptl"

#### macOS:

	ctlptl completion zsh > $(brew --prefix)/share/zsh/site-functions/_ctlptl

You will need to start a new shell for this setup to take effect.


```
ctlptl completion zsh [flags]
```

### Options

```
  -h, --help              help for zsh
      --no-descriptions   disable completion descriptions
```

### SEE ALSO

* [ctlptl completion](ctlptl_completion.md)	 - Generate the autocompletion script for the specified shell

###### Auto generated by spf13/cobra on 21-May-2025
07070100000019000081A400000000000000000000000168AFB0EA00000278000000000000000000000000000000000000002400000000ctlptl-0.8.43/docs/ctlptl_create.md## ctlptl create

Create a cluster or registry

```
ctlptl create [cluster|registry] [flags]
```

### Examples

```
  ctlptl create cluster docker-desktop
  ctlptl create cluster kind --registry=ctlptl-registry
```

### Options

```
  -h, --help   help for create
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences
* [ctlptl create cluster](ctlptl_create_cluster.md)	 - Create a cluster with the given local Kubernetes product
* [ctlptl create registry](ctlptl_create_registry.md)	 - Create a registry with the given name

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001A000081A400000000000000000000000168AFB0EA00000797000000000000000000000000000000000000002C00000000ctlptl-0.8.43/docs/ctlptl_create_cluster.md## ctlptl create cluster

Create a cluster with the given local Kubernetes product

```
ctlptl create cluster [product] [flags]
```

### Examples

```
  ctlptl create cluster docker-desktop
  ctlptl create cluster kind --registry=ctlptl-registry
```

### Options

```
      --allow-missing-template-keys         If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true)
  -h, --help                                help for cluster
      --kubernetes-version string           Sets the kubernetes version for the cluster, if possible
      --min-cpus int                        Sets the minimum CPUs for the cluster
      --minikube-container-runtime string   Minikube container runtime (only applicable to a minikube cluster)
      --minikube-extra-configs strings      Minikube extra configs (only applicable to a minikube cluster)
      --minikube-start-flags strings        Minikube extra start flags (only applicable to a minikube cluster)
      --name string                         Names the context. If not specified, uses the default cluster name for this Kubernetes product
  -o, --output string                       Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file).
      --registry string                     Connect the cluster to the named registry
      --show-managed-fields                 If true, keep the managedFields when printing objects in JSON or YAML format.
      --template string                     Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
```

### SEE ALSO

* [ctlptl create](ctlptl_create.md)	 - Create a cluster or registry

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001B000081A400000000000000000000000168AFB0EA00000625000000000000000000000000000000000000002D00000000ctlptl-0.8.43/docs/ctlptl_create_registry.md## ctlptl create registry

Create a registry with the given name

```
ctlptl create registry [name] [flags]
```

### Examples

```
  ctlptl create registry ctlptl-registry
  ctlptl create registry ctlptl-registry --port=5000
  ctlptl create registry ctlptl-registry --port=5000 --listen-address 0.0.0.0
```

### Options

```
      --allow-missing-template-keys   If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true)
  -h, --help                          help for registry
      --image string                  Registry image to use (default "docker.io/library/registry:2")
      --listen-address string         The host's IP address to bind the container to. If not set defaults to 127.0.0.1
  -o, --output string                 Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file).
      --port int                      The port to expose the registry on host. If not specified, chooses a random port
      --show-managed-fields           If true, keep the managedFields when printing objects in JSON or YAML format.
      --template string               Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
```

### SEE ALSO

* [ctlptl create](ctlptl_create.md)	 - Create a cluster or registry

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001C000081A400000000000000000000000168AFB0EA000002D1000000000000000000000000000000000000002400000000ctlptl-0.8.43/docs/ctlptl_delete.md## ctlptl delete

Delete a currently running cluster

```
ctlptl delete -f FILENAME [flags]
```

### Examples

```
  ctlptl delete -f cluster.yaml
  ctlptl delete cluster minikube
```

### Options

```
      --cascade string     If 'true', objects will be deleted recursively. For example, deleting a cluster will delete any connected registries. Defaults to 'false'. (default "false")
  -f, --filename strings   
  -h, --help               help for delete
      --ignore-not-found   If the requested object does not exist the command will return exit code 0.
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001D000081A400000000000000000000000168AFB0EA00000376000000000000000000000000000000000000002C00000000ctlptl-0.8.43/docs/ctlptl_docker-desktop.md## ctlptl docker-desktop

Debugging tool for the Docker Desktop client

### Examples

```
  ctlptl docker-desktop settings
  ctlptl docker-desktop set KEY VALUE
```

### Options

```
  -h, --help   help for docker-desktop
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences
* [ctlptl docker-desktop open](ctlptl_docker-desktop_open.md)	 - Open docker-desktop
* [ctlptl docker-desktop quit](ctlptl_docker-desktop_quit.md)	 - Shutdown docker-desktop
* [ctlptl docker-desktop reset-cluster](ctlptl_docker-desktop_reset-cluster.md)	 - Reset the docker-desktop Kubernetes cluster
* [ctlptl docker-desktop set](ctlptl_docker-desktop_set.md)	 - Set the docker-desktop settings
* [ctlptl docker-desktop settings](ctlptl_docker-desktop_settings.md)	 - Print the docker-desktop settings

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001E000081A400000000000000000000000168AFB0EA0000013A000000000000000000000000000000000000003100000000ctlptl-0.8.43/docs/ctlptl_docker-desktop_open.md## ctlptl docker-desktop open

Open docker-desktop

```
ctlptl docker-desktop open [flags]
```

### Options

```
  -h, --help   help for open
```

### SEE ALSO

* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client

###### Auto generated by spf13/cobra on 21-May-2025
0707010000001F000081A400000000000000000000000168AFB0EA0000013E000000000000000000000000000000000000003100000000ctlptl-0.8.43/docs/ctlptl_docker-desktop_quit.md## ctlptl docker-desktop quit

Shutdown docker-desktop

```
ctlptl docker-desktop quit [flags]
```

### Options

```
  -h, --help   help for quit
```

### SEE ALSO

* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client

###### Auto generated by spf13/cobra on 21-May-2025
07070100000020000081A400000000000000000000000168AFB0EA0000016D000000000000000000000000000000000000003A00000000ctlptl-0.8.43/docs/ctlptl_docker-desktop_reset-cluster.md## ctlptl docker-desktop reset-cluster

Reset the docker-desktop Kubernetes cluster

```
ctlptl docker-desktop reset-cluster [flags]
```

### Options

```
  -h, --help   help for reset-cluster
```

### SEE ALSO

* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client

###### Auto generated by spf13/cobra on 21-May-2025
07070100000021000081A400000000000000000000000168AFB0EA000002F5000000000000000000000000000000000000003000000000ctlptl-0.8.43/docs/ctlptl_docker-desktop_set.md## ctlptl docker-desktop set

Set the docker-desktop settings

### Synopsis

Set the docker-desktop settings

The first argument is the full path to the setting.

The second argument is the desired value.

Most settings are scalars. vm.fileSharing is a list of paths separated by commas.

```
ctlptl docker-desktop set KEY VALUE [flags]
```

### Examples

```
  ctlptl docker-desktop set vm.resources.cpus 2
   ctlptl docker-desktop set kubernetes.enabled false
  ctlptl docker-desktop set vm.fileSharing /Users,/Volumes,/private,/tmp
```

### Options

```
  -h, --help   help for set
```

### SEE ALSO

* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client

###### Auto generated by spf13/cobra on 21-May-2025
07070100000022000081A400000000000000000000000168AFB0EA00000154000000000000000000000000000000000000003500000000ctlptl-0.8.43/docs/ctlptl_docker-desktop_settings.md## ctlptl docker-desktop settings

Print the docker-desktop settings

```
ctlptl docker-desktop settings [flags]
```

### Options

```
  -h, --help   help for settings
```

### SEE ALSO

* [ctlptl docker-desktop](ctlptl_docker-desktop.md)	 - Debugging tool for the Docker Desktop client

###### Auto generated by spf13/cobra on 21-May-2025
07070100000023000081A400000000000000000000000168AFB0EA00000713000000000000000000000000000000000000002100000000ctlptl-0.8.43/docs/ctlptl_get.md## ctlptl get

Read currently running clusters and registries

### Synopsis

Read the status of currently running clusters and registries.

Supports the same flags as kubectl for selecting
and printing fields. The kubectl cheat sheet may help:

https://kubernetes.io/docs/reference/kubectl/cheatsheet/#formatting-output


```
ctlptl get [type] [name] [flags]
```

### Examples

```
  ctlptl get
  ctlptl get cluster microk8s -o yaml
  ctlptl get cluster kind-kind -o template --template '{{.status.localRegistryHosting.host}}'

```

### Options

```
      --allow-missing-template-keys   If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true)
      --field-selector string         Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
  -h, --help                          help for get
      --ignore-not-found              If the requested object does not exist the command will return exit code 0.
  -o, --output string                 Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file).
      --show-managed-fields           If true, keep the managedFields when printing objects in JSON or YAML format.
      --template string               Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences

###### Auto generated by spf13/cobra on 21-May-2025
07070100000024000081A400000000000000000000000168AFB0EA000001A5000000000000000000000000000000000000002300000000ctlptl-0.8.43/docs/ctlptl_socat.md## ctlptl socat

Use socat to connect components. Experimental.

### Options

```
  -h, --help   help for socat
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences
* [ctlptl socat connect-remote-docker](ctlptl_socat_connect-remote-docker.md)	 - Connects a local port to a remote port on a machine running Docker

###### Auto generated by spf13/cobra on 21-May-2025
07070100000025000081A400000000000000000000000168AFB0EA000001BE000000000000000000000000000000000000003900000000ctlptl-0.8.43/docs/ctlptl_socat_connect-remote-docker.md## ctlptl socat connect-remote-docker

Connects a local port to a remote port on a machine running Docker

```
ctlptl socat connect-remote-docker [flags]
```

### Examples

```
  ctlptl socat connect-remote-docker [port]

```

### Options

```
  -h, --help   help for connect-remote-docker
```

### SEE ALSO

* [ctlptl socat](ctlptl_socat.md)	 - Use socat to connect components. Experimental.

###### Auto generated by spf13/cobra on 21-May-2025
07070100000026000081A400000000000000000000000168AFB0EA0000011D000000000000000000000000000000000000002500000000ctlptl-0.8.43/docs/ctlptl_version.md## ctlptl version

Current ctlptl version

```
ctlptl version [flags]
```

### Options

```
  -h, --help   help for version
```

### SEE ALSO

* [ctlptl](ctlptl.md)	 - Mess around with local Kubernetes clusters without consequences

###### Auto generated by spf13/cobra on 21-May-2025
07070100000027000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001700000000ctlptl-0.8.43/examples07070100000028000081A400000000000000000000000168AFB0EA00000066000000000000000000000000000000000000002B00000000ctlptl-0.8.43/examples/docker-desktop.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: docker-desktop
product: docker-desktop
minCPUs: 4
07070100000029000081A400000000000000000000000168AFB0EA0000009D000000000000000000000000000000000000002700000000ctlptl-0.8.43/examples/k3d-config.yaml# k3d with an embedded config
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: k3d-config
product: k3d
k3d:
  v1alpha5Simple:
    network: custom-network
0707010000002A000081A400000000000000000000000168AFB0EA00000078000000000000000000000000000000000000002900000000ctlptl-0.8.43/examples/k3d-registry.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: k3d-test-registry
labels:
  "app": "k3d"
  "k3d.role": "registry"

0707010000002B000081A400000000000000000000000168AFB0EA00000082000000000000000000000000000000000000002000000000ctlptl-0.8.43/examples/k3d.yaml# Creates a k3d cluster with a registry.
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: k3d
registry: ctlptl-k3d-registry
0707010000002C000081A400000000000000000000000168AFB0EA00000080000000000000000000000000000000000000002100000000ctlptl-0.8.43/examples/kind.yaml# Creates a kind cluster with a registry.
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
0707010000002D000081A400000000000000000000000168AFB0EA00000157000000000000000000000000000000000000002F00000000ctlptl-0.8.43/examples/kind_custom_config.yaml# Creates a kind cluster with Kind's custom cluster config
# https://pkg.go.dev/sigs.k8s.io/kind/pkg/apis/config/v1alpha4#Cluster
# Creates a cluster with 2 nodes.
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
kindV1Alpha4Cluster:
  name: my-cluster
  nodes:
  - role: control-plane
  - role: worker
  
0707010000002E000081A400000000000000000000000168AFB0EA0000024E000000000000000000000000000000000000002C00000000ctlptl-0.8.43/examples/kind_extra_args.yaml# Creates a kind cluster with a registry.
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
kindExtraCreateArguments:
# Example 1: Pass --wait to `kind create cluster` to wait for the control plane to be ready.
- "--wait=2m"
# Example 2: Pass --retain to `kind create cluster` to keep the containers around.
# This is super useful for debugging cluster creation issues.
- "--retain"
# Example 3: Pass --verbosity=3 to `kind create cluster` to get more verbose output.
- # This is also super useful for debugging cluster creation issues
- "--verbosity=3"
0707010000002F000081A400000000000000000000000168AFB0EA00000167000000000000000000000000000000000000002F00000000ctlptl-0.8.43/examples/kind_registry_auth.yaml# Creates a kind cluster with Kind's custom cluster config
#
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: ctlptl-registry
registryAuths:
- host: docker.io
  endpoint: https://registry-1.docker.io
  username: <docker hub username>
  password: <docker hub token>
kindV1Alpha4Cluster:
  name: my-cluster
  nodes:
  - role: control-plane
07070100000030000081A400000000000000000000000168AFB0EA000000B7000000000000000000000000000000000000002C00000000ctlptl-0.8.43/examples/minikube-k8s-14.yaml# Creates a minikube cluster with a registry and Kubernetes v1.14
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: minikube
registry: ctlptl-registry
kubernetesVersion: v1.14.0
07070100000031000081A400000000000000000000000168AFB0EA00000093000000000000000000000000000000000000002500000000ctlptl-0.8.43/examples/minikube.yaml# Creates a minikube cluster with a registry.
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: minikube
registry: ctlptl-registry
minCPUs: 3
07070100000032000081A400000000000000000000000168AFB0EA0000009B000000000000000000000000000000000000002500000000ctlptl-0.8.43/examples/registry.yaml# Creates a registry called ctlptl-registry available on 127.0.0.1:5002
apiVersion: ctlptl.dev/v1alpha1
kind: Registry
port: 5002
listenAddress: 127.0.0.1
07070100000033000081A400000000000000000000000168AFB0EA00001A36000000000000000000000000000000000000001500000000ctlptl-0.8.43/go.modmodule github.com/tilt-dev/ctlptl

go 1.24.0

toolchain go1.24.2

require (
	github.com/blang/semver/v4 v4.0.0
	github.com/distribution/reference v0.6.0
	github.com/docker/cli v28.1.1+incompatible
	github.com/docker/docker v28.1.1+incompatible
	github.com/docker/go-connections v0.5.0
	github.com/google/go-cmp v0.7.0
	github.com/mitchellh/go-homedir v1.1.0
	github.com/opencontainers/image-spec v1.1.1
	github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
	github.com/pkg/errors v0.9.1
	github.com/shirou/gopsutil/v3 v3.24.5
	github.com/spf13/cobra v1.9.1
	github.com/spf13/pflag v1.0.6
	github.com/stretchr/testify v1.10.0
	github.com/tilt-dev/clusterid v0.1.6
	github.com/tilt-dev/localregistry-go v0.0.0-20201021185044-ffc4c827f097
	github.com/tilt-dev/wmclient v0.0.0-20201109174454-1839d0355fbc
	golang.org/x/sync v0.14.0
	gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
	gopkg.in/yaml.v3 v3.0.1
	k8s.io/api v0.33.1
	k8s.io/apimachinery v0.33.1
	k8s.io/cli-runtime v0.33.1
	k8s.io/client-go v0.33.1
	k8s.io/klog/v2 v2.130.1
	sigs.k8s.io/kind v0.30.0
)

require (
	github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
	github.com/Microsoft/go-winio v0.6.2 // indirect
	github.com/beorn7/perks v1.0.1 // indirect
	github.com/cenkalti/backoff/v4 v4.3.0 // indirect
	github.com/cespare/xxhash/v2 v2.3.0 // indirect
	github.com/containerd/log v0.1.0 // indirect
	github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
	github.com/denisbrodbeck/machineid v1.0.1 // indirect
	github.com/docker/distribution v2.8.3+incompatible // indirect
	github.com/docker/docker-credential-helpers v0.9.3 // indirect
	github.com/docker/go-metrics v0.0.1 // indirect
	github.com/docker/go-units v0.5.0 // indirect
	github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
	github.com/emicklei/go-restful/v3 v3.12.2 // indirect
	github.com/felixge/httpsnoop v1.0.4 // indirect
	github.com/fvbommel/sortorder v1.1.0 // indirect
	github.com/fxamacker/cbor/v2 v2.8.0 // indirect
	github.com/go-errors/errors v1.5.1 // indirect
	github.com/go-logr/logr v1.4.2 // indirect
	github.com/go-logr/stdr v1.2.2 // indirect
	github.com/go-ole/go-ole v1.3.0 // indirect
	github.com/go-openapi/jsonpointer v0.21.1 // indirect
	github.com/go-openapi/jsonreference v0.21.0 // indirect
	github.com/go-openapi/swag v0.23.1 // indirect
	github.com/gogo/protobuf v1.3.2 // indirect
	github.com/google/btree v1.1.3 // indirect
	github.com/google/gnostic-models v0.6.9 // indirect
	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
	github.com/google/uuid v1.6.0 // indirect
	github.com/gorilla/mux v1.8.1 // indirect
	github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
	github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
	github.com/inconshreveable/mousetrap v1.1.0 // indirect
	github.com/josharian/intern v1.0.0 // indirect
	github.com/json-iterator/go v1.1.12 // indirect
	github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
	github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
	github.com/mailru/easyjson v0.9.0 // indirect
	github.com/mattn/go-runewidth v0.0.16 // indirect
	github.com/miekg/pkcs11 v1.1.1 // indirect
	github.com/moby/docker-image-spec v1.3.1 // indirect
	github.com/moby/sys/atomicwriter v0.1.0 // indirect
	github.com/moby/sys/sequential v0.6.0 // indirect
	github.com/moby/term v0.5.2 // indirect
	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
	github.com/modern-go/reflect2 v1.0.2 // indirect
	github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
	github.com/morikuni/aec v1.0.0 // indirect
	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
	github.com/opencontainers/go-digest v1.0.0 // indirect
	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
	github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
	github.com/prometheus/client_golang v1.22.0 // indirect
	github.com/prometheus/client_model v0.6.2 // indirect
	github.com/prometheus/common v0.64.0 // indirect
	github.com/prometheus/procfs v0.16.1 // indirect
	github.com/rivo/uniseg v0.4.7 // indirect
	github.com/russross/blackfriday/v2 v2.1.0 // indirect
	github.com/shoenig/go-m1cpu v0.1.6 // indirect
	github.com/sirupsen/logrus v1.9.3 // indirect
	github.com/theupdateframework/notary v0.7.0 // indirect
	github.com/tklauser/go-sysconf v0.3.15 // indirect
	github.com/tklauser/numcpus v0.10.0 // indirect
	github.com/x448/float16 v0.8.4 // indirect
	github.com/xlab/treeprint v1.2.0 // indirect
	github.com/yusufpapurcu/wmi v1.2.4 // indirect
	go.opentelemetry.io/auto/sdk v1.1.0 // indirect
	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
	go.opentelemetry.io/otel v1.35.0 // indirect
	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect
	go.opentelemetry.io/otel/metric v1.35.0 // indirect
	go.opentelemetry.io/otel/sdk v1.35.0 // indirect
	go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
	go.opentelemetry.io/otel/trace v1.35.0 // indirect
	go.opentelemetry.io/proto/otlp v1.6.0 // indirect
	golang.org/x/net v0.40.0 // indirect
	golang.org/x/oauth2 v0.30.0 // indirect
	golang.org/x/sys v0.33.0 // indirect
	golang.org/x/term v0.32.0 // indirect
	golang.org/x/text v0.25.0 // indirect
	golang.org/x/time v0.11.0 // indirect
	google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 // indirect
	google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect
	google.golang.org/grpc v1.72.1 // indirect
	google.golang.org/protobuf v1.36.6 // indirect
	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
	gopkg.in/inf.v0 v0.9.1 // indirect
	gopkg.in/yaml.v2 v2.4.0 // indirect
	gotest.tools/v3 v3.0.3 // indirect
	k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
	k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 // indirect
	sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
	sigs.k8s.io/kustomize/api v0.19.0 // indirect
	sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
	sigs.k8s.io/randfill v1.0.0 // indirect
	sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
	sigs.k8s.io/yaml v1.4.0 // indirect
)
07070100000034000081A400000000000000000000000168AFB0EA00010348000000000000000000000000000000000000001500000000ctlptl-0.8.43/go.sumcloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisbrodbeck/machineid v1.0.0/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ=
github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc=
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/clusterid v0.1.6 h1:nkU78/AifCAemucL2lWeZRzZLprif9hjIc6keGfwUyo=
github.com/tilt-dev/clusterid v0.1.6/go.mod h1:0Ymq2EAiHlLt61243A6xqS9VRcmGqUAATZM+tryv4WI=
github.com/tilt-dev/localregistry-go v0.0.0-20201021185044-ffc4c827f097 h1:CiCHb20O+poFO331eFOiXRscunvAhKBfDmDoY2mf45A=
github.com/tilt-dev/localregistry-go v0.0.0-20201021185044-ffc4c827f097/go.mod h1:SX7bKYACP+RsddxA+NBkfVzr5DOr5ranTirgT7xlxjA=
github.com/tilt-dev/wmclient v0.0.0-20201109174454-1839d0355fbc h1:wGkAoZhrvnmq93B4W2v+agiPl7xzqUaxXkxmKrwJ6bc=
github.com/tilt-dev/wmclient v0.0.0-20201109174454-1839d0355fbc/go.mod h1:n01fG3LbImzxBP3GGCTHkgXuPeJusWg6xv0QYGm9HtE=
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201106081118-db71ae66460a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 h1:WvBuA5rjZx9SNIzgcU53OohgZy6lKSus++uY4xLaWKc=
google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9/go.mod h1:W3S/3np0/dPWsWLi1h/UymYctGXaGBM2StwzD0y140U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 h1:IkAfh6J/yllPtpYFU0zZN1hUPYdT0ogkBT/9hMxHjvg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4=
k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/cli-runtime v0.18.4/go.mod h1:9/hS/Cuf7NVzWR5F/5tyS6xsnclxoPLVtwhnkJG1Y4g=
k8s.io/cli-runtime v0.33.1 h1:TvpjEtF71ViFmPeYMj1baZMJR4iWUEplklsUQ7D3quA=
k8s.io/cli-runtime v0.33.1/go.mod h1:9dz5Q4Uh8io4OWCLiEf/217DXwqNgiTS/IOuza99VZE=
k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g=
k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4=
k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg=
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY=
sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
07070100000035000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001300000000ctlptl-0.8.43/hack07070100000036000081A400000000000000000000000168AFB0EA000006F4000000000000000000000000000000000000001E00000000ctlptl-0.8.43/hack/Dockerfile# Builds a Docker image with:
# - ctlptl
# - docker
# - kubectl
# - kind
# - socat
#
# Good base image for anyone that wants to use ctlptl in a CI environment
# to set up a one-time-use cluster.
#
# Built with goreleaser.

FROM debian:bookworm-slim

RUN apt update && apt install -y curl ca-certificates liblz4-tool rsync socat

# Install docker CLI
RUN set -exu \
  # Add Docker's official GPG key:
  && install -m 0755 -d /etc/apt/keyrings \
  && curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc \
  && chmod a+r /etc/apt/keyrings/docker.asc \
  # Add the repository to Apt sources: 
  && echo \
    "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
    $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
  tee /etc/apt/sources.list.d/docker.list > /dev/null \
  && apt update \
  && apt install -y docker-ce-cli=5:25.0.3-1~debian.12~bookworm 

# Install kubectl client
ARG TARGETARCH
ENV KUBECTL_VERSION=v1.29.1
RUN curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" \
    && curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256" \
    && echo "$(cat kubectl.sha256)  kubectl" | sha256sum --check \
    && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

# Install Kind
ENV KIND_VERSION=v0.30.0
RUN set -exu \
  && KIND_URL="https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-$TARGETARCH" \
  && curl --silent --show-error --location --fail --retry 3 --output ./kind-linux-$TARGETARCH "$KIND_URL" \
  && chmod +x ./kind-linux-$TARGETARCH \
  && mv ./kind-linux-$TARGETARCH /usr/local/bin/kind

COPY ctlptl /usr/local/bin/ctlptl
07070100000037000081A400000000000000000000000168AFB0EA0000022B000000000000000000000000000000000000002600000000ctlptl-0.8.43/hack/boilerplate.go.txt/*
Copyright 2020 Tilt Dev

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
07070100000038000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001E00000000ctlptl-0.8.43/hack/make-rules07070100000039000081ED00000000000000000000000168AFB0EA00000172000000000000000000000000000000000000002B00000000ctlptl-0.8.43/hack/make-rules/generated.sh#!/bin/bash

set -exuo pipefail

REPO_ROOT=$(dirname $(dirname $(dirname "$0")))
cd "${REPO_ROOT}"

GOROOT="$(go env GOROOT)"
rm -f pkg/api/*.deepcopy.go
rm -f pkg/api/*/*.deepcopy.go
go install k8s.io/code-generator/cmd/deepcopy-gen@v0.31.2
deepcopy-gen \
   --go-header-file hack/boilerplate.go.txt \
   ./pkg/api \
   ./pkg/api/k3dv1alpha4 \
   ./pkg/api/k3dv1alpha5
0707010000003A000081ED00000000000000000000000168AFB0EA0000025C000000000000000000000000000000000000002700000000ctlptl-0.8.43/hack/publish-ci-image.sh#!/bin/bash

set -euo pipefail

BUILDER=buildx-multiarch
IMAGE_NAME=docker/tilt-ctlptl-ci

docker buildx inspect $BUILDER || docker buildx create --name=$BUILDER --driver=docker-container --driver-opt=network=host
docker buildx build --builder=$BUILDER --pull --platform=linux/amd64,linux/arm64 --push -t "$IMAGE_NAME" -f .circleci/Dockerfile .

# add some bash code to pull the image and pull out the tag
docker pull "$IMAGE_NAME"
DIGEST="$(docker inspect --format '{{.RepoDigests}}' "$IMAGE_NAME" | tr -d '[]')"

yq eval -i ".jobs.e2e-remote-docker.docker[0].image = \"$DIGEST\"" .circleci/config.yml

0707010000003B000081ED00000000000000000000000168AFB0EA000003F3000000000000000000000000000000000000002A00000000ctlptl-0.8.43/hack/release-update-docs.sh#!/bin/bash
#
# Updates the Tilt repo with the latest version info
# and regenerates the CLI docs.
#
# Usage:
# scripts/update-tilt-repo.sh $VERSION
# where VERSION is of the form 0.1.0

set -euo pipefail

if [[ "${GITHUB_TOKEN-}" == "" ]]; then
    echo "Missing GITHUB_TOKEN"
    exit 1
fi

VERSION=${1//v/}
VERSION_PATTERN="^[0-9]+\\.[0-9]+\\.[0-9]+$"
if ! [[ $VERSION =~ $VERSION_PATTERN ]]; then
    echo "Version did not match expected pattern. Actual: $VERSION"
    exit 1
fi

DIR=$(dirname "$0")
cd "$DIR/.."

ROOT=$(mktemp -d)
git clone https://tilt-releaser:"$GITHUB_TOKEN"@github.com/tilt-dev/ctlptl "$ROOT"

set -x
cd "$ROOT"
sed -i -E "s/CTLPTL_VERSION=\".*\"/CTLPTL_VERSION=\"$VERSION\"/" INSTALL.md
sed -i -E "s/CTLPTL_VERSION = \".*\"/CTLPTL_VERSION = \"$VERSION\"/" INSTALL.md
go run ./cmd/ctlptl docs ./docs
git add .
git config --global user.email "it@tilt.dev"
git config --global user.name "Tilt Dev"
git commit -a -m "Update version numbers: $VERSION"
git push origin main

rm -fR "$ROOT"
0707010000003C000081ED00000000000000000000000168AFB0EA000001D3000000000000000000000000000000000000001E00000000ctlptl-0.8.43/hack/release.sh#!/bin/bash
#
# Do a complete release. Run on CI.

set -ex

if [[ "$GITHUB_TOKEN" == "" ]]; then
    echo "Missing GITHUB_TOKEN"
    exit 1
fi

if [[ "$DOCKER_TOKEN" == "" ]]; then
    echo "Missing DOCKER_TOKEN"
    exit 1
fi

DIR=$(dirname "$0")
cd "$DIR/.."

echo "$DOCKER_TOKEN" | docker login --username "$DOCKER_USERNAME" --password-stdin

git fetch --tags
goreleaser --clean

VERSION=$(git describe --abbrev=0 --tags)

./hack/release-update-docs.sh "$VERSION"
0707010000003D000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001700000000ctlptl-0.8.43/internal0707010000003E000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001C00000000ctlptl-0.8.43/internal/dctr0707010000003F000081A400000000000000000000000168AFB0EA000017FD000000000000000000000000000000000000002300000000ctlptl-0.8.43/internal/dctr/run.gopackage dctr

import (
	"context"
	"fmt"
	"io"

	"github.com/distribution/reference"
	"github.com/docker/cli/cli/command"
	cliflags "github.com/docker/cli/cli/flags"
	"github.com/docker/docker/api/types"
	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/image"
	"github.com/docker/docker/api/types/network"
	registrytypes "github.com/docker/docker/api/types/registry"
	"github.com/docker/docker/api/types/system"
	"github.com/docker/docker/client"
	"github.com/docker/docker/registry"
	specs "github.com/opencontainers/image-spec/specs-go/v1"
	"github.com/pkg/errors"
	"github.com/spf13/pflag"
	"k8s.io/cli-runtime/pkg/genericclioptions"
)

// Docker Container client.
type Client interface {
	DaemonHost() string
	ImagePull(ctx context.Context, image string, options image.PullOptions) (io.ReadCloser, error)

	ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error)
	ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error)
	ContainerRemove(ctx context.Context, id string, options container.RemoveOptions) error
	ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error)
	ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error

	ServerVersion(ctx context.Context) (types.Version, error)
	Info(ctx context.Context) (system.Info, error)
	NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error
	NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error
}

type CLI interface {
	Client() Client
	AuthInfo(ctx context.Context, repoInfo *registry.RepositoryInfo, cmdName string) (string, registrytypes.RequestAuthConfig, error)
}

type realCLI struct {
	cli *command.DockerCli
}

func (c *realCLI) Client() Client {
	return c.cli.Client()
}

func (c *realCLI) AuthInfo(ctx context.Context, repoInfo *registry.RepositoryInfo, cmdName string) (string, registrytypes.RequestAuthConfig, error) {
	authConfig := command.ResolveAuthConfig(c.cli.ConfigFile(), repoInfo.Index)
	requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c.cli, repoInfo.Index, cmdName)

	auth, err := registrytypes.EncodeAuthConfig(authConfig)
	if err != nil {
		return "", nil, errors.Wrap(err, "authInfo#EncodeAuthToBase64")
	}
	return auth, requestPrivilege, nil
}

func NewCLI(streams genericclioptions.IOStreams) (CLI, error) {
	dockerCli, err := command.NewDockerCli(
		command.WithOutputStream(streams.Out),
		command.WithErrorStream(streams.ErrOut))
	if err != nil {
		return nil, fmt.Errorf("failed to create new docker API: %v", err)
	}

	opts := cliflags.NewClientOptions()
	flagSet := pflag.NewFlagSet("docker", pflag.ContinueOnError)
	opts.InstallFlags(flagSet)
	opts.SetDefaultOptions(flagSet)
	err = dockerCli.Initialize(opts)
	if err != nil {
		return nil, fmt.Errorf("initializing docker client: %v", err)
	}

	// A hack to see if initialization failed.
	// https://github.com/docker/cli/issues/4489
	endpoint := dockerCli.DockerEndpoint()
	if endpoint.Host == "" {
		return nil, fmt.Errorf("initializing docker client: no valid endpoint")
	}
	return &realCLI{cli: dockerCli}, nil
}

func NewAPIClient(streams genericclioptions.IOStreams) (Client, error) {
	cli, err := NewCLI(streams)
	if err != nil {
		return nil, err
	}
	return cli.Client(), nil
}

// A simplified remove-container-if-necessary helper.
func RemoveIfNecessary(ctx context.Context, c Client, name string) error {
	co, err := c.ContainerInspect(ctx, name)
	if err != nil {
		if client.IsErrNotFound(err) {
			return nil
		}
		return err
	}
	if co.ContainerJSONBase == nil {
		return nil
	}

	return c.ContainerRemove(ctx, co.ID, container.RemoveOptions{
		Force: true,
	})
}

// A simplified run-container-and-detach helper for background support containers (like socat and the registry).
func Run(ctx context.Context, cli CLI, name string, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error {
	c := cli.Client()

	ctr, err := c.ContainerInspect(ctx, name)
	if err == nil && (ctr.ContainerJSONBase != nil && ctr.State.Running) {
		// The service is already running!
		return nil
	} else if err == nil {
		// The service exists, but is not running
		err := c.ContainerRemove(ctx, name, container.RemoveOptions{Force: true})
		if err != nil {
			return fmt.Errorf("creating %s: %v", name, err)
		}
	} else if !client.IsErrNotFound(err) {
		return fmt.Errorf("inspecting %s: %v", name, err)
	}

	resp, err := c.ContainerCreate(ctx, config, hostConfig, networkingConfig, nil, name)
	if err != nil {
		if !client.IsErrNotFound(err) {
			return fmt.Errorf("creating %s: %v", name, err)
		}

		err := pull(ctx, cli, config.Image)
		if err != nil {
			return fmt.Errorf("pulling image %s: %v", config.Image, err)
		}

		resp, err = c.ContainerCreate(ctx, config, hostConfig, networkingConfig, nil, name)
		if err != nil {
			return fmt.Errorf("creating %s: %v", name, err)
		}
	}

	id := resp.ID
	err = c.ContainerStart(ctx, id, container.StartOptions{})
	if err != nil {
		return fmt.Errorf("starting %s: %v", name, err)
	}
	return nil
}

func pull(ctx context.Context, cli CLI, img string) error {
	c := cli.Client()

	ref, err := reference.ParseNormalizedNamed(img)
	if err != nil {
		return fmt.Errorf("could not parse image %q: %v", img, err)
	}

	repoInfo, err := registry.ParseRepositoryInfo(ref)
	if err != nil {
		return fmt.Errorf("could not parse registry for %q: %v", img, err)
	}

	encodedAuth, requestPrivilege, err := cli.AuthInfo(ctx, repoInfo, "pull")
	if err != nil {
		return fmt.Errorf("could not authenticate: %v", err)
	}

	resp, err := c.ImagePull(ctx, img, image.PullOptions{
		RegistryAuth:  encodedAuth,
		PrivilegeFunc: requestPrivilege,
	})
	if err != nil {
		return fmt.Errorf("pulling image %s: %v", img, err)
	}
	defer func() {
		_ = resp.Close()
	}()

	_, _ = io.Copy(io.Discard, resp)
	return nil
}
07070100000040000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001C00000000ctlptl-0.8.43/internal/exec07070100000041000081A400000000000000000000000168AFB0EA000006EB000000000000000000000000000000000000002400000000ctlptl-0.8.43/internal/exec/exec.gopackage exec

import (
	"context"
	"io"
	"os/exec"
	"strings"

	"k8s.io/cli-runtime/pkg/genericclioptions"
)

// A dummy package to help with mocking out exec.NewCommand

type CmdRunner interface {
	Run(ctx context.Context, cmd string, args ...string) error
	RunIO(ctx context.Context, iostreams genericclioptions.IOStreams, cmd string, args ...string) error
}

type RealCmdRunner struct{}

func (RealCmdRunner) Run(ctx context.Context, cmd string, args ...string) error {
	// For some reason, ExitError only gets populated with Stderr if we call Output().
	_, err := exec.CommandContext(ctx, cmd, args...).Output()

	return err
}

func (RealCmdRunner) RunIO(ctx context.Context, iostreams genericclioptions.IOStreams, cmd string, args ...string) error {
	c := exec.CommandContext(ctx, cmd, args...)
	c.Stdin = iostreams.In
	c.Stderr = iostreams.ErrOut
	c.Stdout = iostreams.Out
	return c.Run()
}

type FakeCmdRunner struct {
	handler   func(argv []string) string
	LastArgs  []string
	LastStdin string
}

func NewFakeCmdRunner(handler func(argv []string) string) *FakeCmdRunner {
	return &FakeCmdRunner{handler: handler}
}

func (f *FakeCmdRunner) Run(ctx context.Context, cmd string, args ...string) error {
	f.LastArgs = append([]string{cmd}, args...)
	_ = f.handler(append([]string{cmd}, args...))
	return nil
}

func (f *FakeCmdRunner) RunIO(ctx context.Context, iostreams genericclioptions.IOStreams, cmd string, args ...string) error {
	f.LastArgs = append([]string{cmd}, args...)

	if iostreams.In != nil {
		in, err := io.ReadAll(iostreams.In)
		if err != nil {
			return err
		}

		f.LastStdin = string(in)
	} else {
		f.LastStdin = ""
	}

	out := f.handler(append([]string{cmd}, args...))
	_, err := io.Copy(iostreams.Out, strings.NewReader(out))
	return err
}
07070100000042000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001D00000000ctlptl-0.8.43/internal/socat07070100000043000081A400000000000000000000000168AFB0EA00000CD9000000000000000000000000000000000000002600000000ctlptl-0.8.43/internal/socat/socat.go// Manage socat network routers for remote docker instances.
package socat

import (
	"context"
	"fmt"
	"net"
	"os/exec"
	"strings"
	"time"

	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/network"
	"github.com/shirou/gopsutil/v3/process"

	"github.com/tilt-dev/ctlptl/internal/dctr"
)

const serviceName = "ctlptl-portforward-service"

type Controller struct {
	cli dctr.CLI
}

func NewController(cli dctr.CLI) *Controller {
	return &Controller{cli: cli}
}

// Connect a port on the local machine to a port on a remote docker machine.
func (c *Controller) ConnectRemoteDockerPort(ctx context.Context, port int) error {
	err := c.StartRemotePortforwarder(ctx)
	if err != nil {
		return err
	}
	return c.StartLocalPortforwarder(ctx, port)
}

// Create a port-forwarding server on the same machine that's running
// Docker. This server accepts connections and routes them to localhost ports
// on the same machine.
func (c *Controller) StartRemotePortforwarder(ctx context.Context) error {
	return dctr.Run(
		ctx,
		c.cli,
		serviceName,
		&container.Config{
			Hostname:   serviceName,
			Image:      "alpine/socat",
			Entrypoint: []string{"/bin/sh"},
			Cmd:        []string{"-c", "while true; do sleep 1000; done"},
		},
		&container.HostConfig{
			NetworkMode:   "host",
			RestartPolicy: container.RestartPolicy{Name: "always"},
		},
		&network.NetworkingConfig{})
}

// Returns the socat process listening on a port, plus its commandline.
func (c *Controller) socatProcessOnPort(port int) (*process.Process, string, error) {
	processes, err := process.Processes()
	if err != nil {
		return nil, "", err
	}
	for _, p := range processes {
		cmdline, err := p.Cmdline()
		if err != nil {
			continue
		}
		if strings.HasPrefix(cmdline, fmt.Sprintf("socat TCP-LISTEN:%d,", port)) {
			return p, cmdline, nil
		}
	}
	return nil, "", nil
}

// Create a port-forwarding server on the local machine, forwarding connections
// to the same port on the remote Docker server.
func (c *Controller) StartLocalPortforwarder(ctx context.Context, port int) error {
	args := []string{
		fmt.Sprintf("TCP-LISTEN:%d,reuseaddr,fork", port),
		fmt.Sprintf("EXEC:'docker exec -i %s socat STDIO TCP:localhost:%d'", serviceName, port),
	}

	existing, cmdline, err := c.socatProcessOnPort(port)
	if err != nil {
		return fmt.Errorf("start portforwarder: %v", err)
	}

	if existing != nil {
		expectedCmdline := strings.Join(append([]string{"socat"}, args...), " ")
		if expectedCmdline == cmdline {
			// Already running.
			return nil
		}

		// Kill and restart.
		err := existing.KillWithContext(ctx)
		if err != nil {
			return fmt.Errorf("start portforwarder: %v", err)
		}
	}

	cmd := exec.Command("socat", args...)
	err = cmd.Start()
	if err != nil {
		_, err := exec.LookPath("socat")
		if err != nil {
			return fmt.Errorf("socat not installed: ctlptl requires 'socat' to be installed when setting up clusters on a remote Docker daemon")
		}

		return fmt.Errorf("creating local portforwarder: %v", err)
	}

	for i := 0; i < 100; i++ {
		conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port))
		if err == nil {
			_ = conn.Close()
			return nil
		}
		time.Sleep(100 * time.Millisecond)
	}
	return fmt.Errorf("timed out waiting for local portforwarder")
}
07070100000044000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001200000000ctlptl-0.8.43/pkg07070100000045000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001600000000ctlptl-0.8.43/pkg/api07070100000046000081A400000000000000000000000168AFB0EA0000016C000000000000000000000000000000000000002300000000ctlptl-0.8.43/pkg/api/accessors.gopackage api

import (
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func (c *Cluster) GetObjectMeta() metav1.Object {
	return &metav1.ObjectMeta{
		Name: c.Name,
	}
}

func (r *Registry) GetObjectMeta() metav1.Object {
	return &metav1.ObjectMeta{
		Name: r.Name,
	}
}

var _ metav1.ObjectMetaAccessor = &Cluster{}
var _ metav1.ObjectMetaAccessor = &Registry{}
07070100000047000081A400000000000000000000000168AFB0EA0000011B000000000000000000000000000000000000001D00000000ctlptl-0.8.43/pkg/api/doc.go// Package v1alpha1 implements the v1alpha1 apiVersion of ctlptl's cluster
// configuration
//
// Borrows the approach of clientcmd/api and KIND, maintaining an API similar to
// other Kubernetes APIs without pulling in the API machinery.
//
// +k8s:deepcopy-gen=package
package api
07070100000048000081A400000000000000000000000168AFB0EA000020A4000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/api/generated.deepcopy.go//go:build !ignore_autogenerated
// +build !ignore_autogenerated

/*
Copyright 2020 Tilt Dev

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// Code generated by deepcopy-gen. DO NOT EDIT.

package api

import (
	k3dv1alpha4 "github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha4"
	k3dv1alpha5 "github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha5"
	localregistrygo "github.com/tilt-dev/localregistry-go"
	runtime "k8s.io/apimachinery/pkg/runtime"
	v1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
)

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Cluster) DeepCopyInto(out *Cluster) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	if in.KindV1Alpha4Cluster != nil {
		in, out := &in.KindV1Alpha4Cluster, &out.KindV1Alpha4Cluster
		*out = new(v1alpha4.Cluster)
		(*in).DeepCopyInto(*out)
	}
	if in.Minikube != nil {
		in, out := &in.Minikube, &out.Minikube
		*out = new(MinikubeCluster)
		(*in).DeepCopyInto(*out)
	}
	if in.K3D != nil {
		in, out := &in.K3D, &out.K3D
		*out = new(K3DCluster)
		(*in).DeepCopyInto(*out)
	}
	in.Status.DeepCopyInto(&out.Status)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
func (in *Cluster) DeepCopy() *Cluster {
	if in == nil {
		return nil
	}
	out := new(Cluster)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Cluster) DeepCopyObject() runtime.Object {
	if c := in.DeepCopy(); c != nil {
		return c
	}
	return nil
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	if in.Items != nil {
		in, out := &in.Items, &out.Items
		*out = make([]Cluster, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
func (in *ClusterList) DeepCopy() *ClusterList {
	if in == nil {
		return nil
	}
	out := new(ClusterList)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterList) DeepCopyObject() runtime.Object {
	if c := in.DeepCopy(); c != nil {
		return c
	}
	return nil
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
	*out = *in
	in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
	if in.LocalRegistryHosting != nil {
		in, out := &in.LocalRegistryHosting, &out.LocalRegistryHosting
		*out = new(localregistrygo.LocalRegistryHostingV1)
		**out = **in
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
	if in == nil {
		return nil
	}
	out := new(ClusterStatus)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3DCluster) DeepCopyInto(out *K3DCluster) {
	*out = *in
	if in.V1Alpha5Simple != nil {
		in, out := &in.V1Alpha5Simple, &out.V1Alpha5Simple
		*out = new(k3dv1alpha5.SimpleConfig)
		(*in).DeepCopyInto(*out)
	}
	if in.V1Alpha4Simple != nil {
		in, out := &in.V1Alpha4Simple, &out.V1Alpha4Simple
		*out = new(k3dv1alpha4.SimpleConfig)
		(*in).DeepCopyInto(*out)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3DCluster.
func (in *K3DCluster) DeepCopy() *K3DCluster {
	if in == nil {
		return nil
	}
	out := new(K3DCluster)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MinikubeCluster) DeepCopyInto(out *MinikubeCluster) {
	*out = *in
	if in.ExtraConfigs != nil {
		in, out := &in.ExtraConfigs, &out.ExtraConfigs
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	if in.StartFlags != nil {
		in, out := &in.StartFlags, &out.StartFlags
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinikubeCluster.
func (in *MinikubeCluster) DeepCopy() *MinikubeCluster {
	if in == nil {
		return nil
	}
	out := new(MinikubeCluster)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Registry) DeepCopyInto(out *Registry) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	if in.Labels != nil {
		in, out := &in.Labels, &out.Labels
		*out = make(map[string]string, len(*in))
		for key, val := range *in {
			(*out)[key] = val
		}
	}
	if in.Env != nil {
		in, out := &in.Env, &out.Env
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	in.Status.DeepCopyInto(&out.Status)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Registry.
func (in *Registry) DeepCopy() *Registry {
	if in == nil {
		return nil
	}
	out := new(Registry)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Registry) DeepCopyObject() runtime.Object {
	if c := in.DeepCopy(); c != nil {
		return c
	}
	return nil
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RegistryList) DeepCopyInto(out *RegistryList) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	if in.Items != nil {
		in, out := &in.Items, &out.Items
		*out = make([]Registry, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryList.
func (in *RegistryList) DeepCopy() *RegistryList {
	if in == nil {
		return nil
	}
	out := new(RegistryList)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RegistryList) DeepCopyObject() runtime.Object {
	if c := in.DeepCopy(); c != nil {
		return c
	}
	return nil
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RegistryStatus) DeepCopyInto(out *RegistryStatus) {
	*out = *in
	in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
	if in.Networks != nil {
		in, out := &in.Networks, &out.Networks
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	if in.Labels != nil {
		in, out := &in.Labels, &out.Labels
		*out = make(map[string]string, len(*in))
		for key, val := range *in {
			(*out)[key] = val
		}
	}
	if in.Env != nil {
		in, out := &in.Env, &out.Env
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	if in.Warnings != nil {
		in, out := &in.Warnings, &out.Warnings
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryStatus.
func (in *RegistryStatus) DeepCopy() *RegistryStatus {
	if in == nil {
		return nil
	}
	out := new(RegistryStatus)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypeMeta) DeepCopyInto(out *TypeMeta) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta.
func (in *TypeMeta) DeepCopy() *TypeMeta {
	if in == nil {
		return nil
	}
	out := new(TypeMeta)
	in.DeepCopyInto(out)
	return out
}
07070100000049000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000002200000000ctlptl-0.8.43/pkg/api/k3dv1alpha40707010000004A000081A400000000000000000000000168AFB0EA00000084000000000000000000000000000000000000002900000000ctlptl-0.8.43/pkg/api/k3dv1alpha4/doc.go// Package k3dv1alpha4 implements the v1alpha4 apiVersion of k3d's config file.
//
// +k8s:deepcopy-gen=package
package k3dv1alpha4
0707010000004B000081A400000000000000000000000168AFB0EA00003123000000000000000000000000000000000000003800000000ctlptl-0.8.43/pkg/api/k3dv1alpha4/generated.deepcopy.go//go:build !ignore_autogenerated
// +build !ignore_autogenerated

/*
Copyright 2020 Tilt Dev

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// Code generated by deepcopy-gen. DO NOT EDIT.

package k3dv1alpha4

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvVarWithNodeFilters) DeepCopyInto(out *EnvVarWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarWithNodeFilters.
func (in *EnvVarWithNodeFilters) DeepCopy() *EnvVarWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(EnvVarWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sArgWithNodeFilters) DeepCopyInto(out *K3sArgWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sArgWithNodeFilters.
func (in *K3sArgWithNodeFilters) DeepCopy() *K3sArgWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(K3sArgWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelWithNodeFilters) DeepCopyInto(out *LabelWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelWithNodeFilters.
func (in *LabelWithNodeFilters) DeepCopy() *LabelWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(LabelWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
func (in *ObjectMeta) DeepCopy() *ObjectMeta {
	if in == nil {
		return nil
	}
	out := new(ObjectMeta)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortWithNodeFilters) DeepCopyInto(out *PortWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortWithNodeFilters.
func (in *PortWithNodeFilters) DeepCopy() *PortWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(PortWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfig) DeepCopyInto(out *SimpleConfig) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	out.ObjectMeta = in.ObjectMeta
	out.ExposeAPI = in.ExposeAPI
	if in.Volumes != nil {
		in, out := &in.Volumes, &out.Volumes
		*out = make([]VolumeWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	if in.Ports != nil {
		in, out := &in.Ports, &out.Ports
		*out = make([]PortWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	in.Options.DeepCopyInto(&out.Options)
	if in.Env != nil {
		in, out := &in.Env, &out.Env
		*out = make([]EnvVarWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	in.Registries.DeepCopyInto(&out.Registries)
	if in.HostAliases != nil {
		in, out := &in.HostAliases, &out.HostAliases
		*out = make([]SimpleConfigHostAlias, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfig.
func (in *SimpleConfig) DeepCopy() *SimpleConfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigHostAlias) DeepCopyInto(out *SimpleConfigHostAlias) {
	*out = *in
	if in.Hostnames != nil {
		in, out := &in.Hostnames, &out.Hostnames
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigHostAlias.
func (in *SimpleConfigHostAlias) DeepCopy() *SimpleConfigHostAlias {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigHostAlias)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptions) DeepCopyInto(out *SimpleConfigOptions) {
	*out = *in
	in.K3dOptions.DeepCopyInto(&out.K3dOptions)
	in.K3sOptions.DeepCopyInto(&out.K3sOptions)
	out.KubeconfigOptions = in.KubeconfigOptions
	in.Runtime.DeepCopyInto(&out.Runtime)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptions.
func (in *SimpleConfigOptions) DeepCopy() *SimpleConfigOptions {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptions)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3d) DeepCopyInto(out *SimpleConfigOptionsK3d) {
	*out = *in
	in.Loadbalancer.DeepCopyInto(&out.Loadbalancer)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3d.
func (in *SimpleConfigOptionsK3d) DeepCopy() *SimpleConfigOptionsK3d {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3d)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3dLoadbalancer) DeepCopyInto(out *SimpleConfigOptionsK3dLoadbalancer) {
	*out = *in
	if in.ConfigOverrides != nil {
		in, out := &in.ConfigOverrides, &out.ConfigOverrides
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3dLoadbalancer.
func (in *SimpleConfigOptionsK3dLoadbalancer) DeepCopy() *SimpleConfigOptionsK3dLoadbalancer {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3dLoadbalancer)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3s) DeepCopyInto(out *SimpleConfigOptionsK3s) {
	*out = *in
	if in.ExtraArgs != nil {
		in, out := &in.ExtraArgs, &out.ExtraArgs
		*out = make([]K3sArgWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	if in.NodeLabels != nil {
		in, out := &in.NodeLabels, &out.NodeLabels
		*out = make([]LabelWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3s.
func (in *SimpleConfigOptionsK3s) DeepCopy() *SimpleConfigOptionsK3s {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3s)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsKubeconfig) DeepCopyInto(out *SimpleConfigOptionsKubeconfig) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsKubeconfig.
func (in *SimpleConfigOptionsKubeconfig) DeepCopy() *SimpleConfigOptionsKubeconfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsKubeconfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsRuntime) DeepCopyInto(out *SimpleConfigOptionsRuntime) {
	*out = *in
	if in.Labels != nil {
		in, out := &in.Labels, &out.Labels
		*out = make([]LabelWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsRuntime.
func (in *SimpleConfigOptionsRuntime) DeepCopy() *SimpleConfigOptionsRuntime {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsRuntime)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigRegistries) DeepCopyInto(out *SimpleConfigRegistries) {
	*out = *in
	if in.Use != nil {
		in, out := &in.Use, &out.Use
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	if in.Create != nil {
		in, out := &in.Create, &out.Create
		*out = new(SimpleConfigRegistryCreateConfig)
		(*in).DeepCopyInto(*out)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigRegistries.
func (in *SimpleConfigRegistries) DeepCopy() *SimpleConfigRegistries {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigRegistries)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigRegistryCreateConfig) DeepCopyInto(out *SimpleConfigRegistryCreateConfig) {
	*out = *in
	if in.Volumes != nil {
		in, out := &in.Volumes, &out.Volumes
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigRegistryCreateConfig.
func (in *SimpleConfigRegistryCreateConfig) DeepCopy() *SimpleConfigRegistryCreateConfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigRegistryCreateConfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleExposureOpts) DeepCopyInto(out *SimpleExposureOpts) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleExposureOpts.
func (in *SimpleExposureOpts) DeepCopy() *SimpleExposureOpts {
	if in == nil {
		return nil
	}
	out := new(SimpleExposureOpts)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypeMeta) DeepCopyInto(out *TypeMeta) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta.
func (in *TypeMeta) DeepCopy() *TypeMeta {
	if in == nil {
		return nil
	}
	out := new(TypeMeta)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeWithNodeFilters) DeepCopyInto(out *VolumeWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeWithNodeFilters.
func (in *VolumeWithNodeFilters) DeepCopy() *VolumeWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(VolumeWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}
0707010000004C000081A400000000000000000000000168AFB0EA00001F2C000000000000000000000000000000000000002B00000000ctlptl-0.8.43/pkg/api/k3dv1alpha4/types.gopackage k3dv1alpha4

import "time"

// NOTE(nicks): Forked from
// https://github.com/k3d-io/k3d/blob/v5.4.6/pkg/config/v1alpha4/types.go
// Modified to work with k8s api infra.

// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta
// No need for a direct dependence; the fields are stable.
type TypeMeta struct {
	Kind       string `json:"kind,omitempty" yaml:"kind,omitempty"`
	APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
}

type ObjectMeta struct {
	Name string `mapstructure:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
}

type VolumeWithNodeFilters struct {
	Volume      string   `mapstructure:"volume" yaml:"volume,omitempty" json:"volume,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty" json:"nodeFilters,omitempty"`
}

type PortWithNodeFilters struct {
	Port        string   `mapstructure:"port" yaml:"port,omitempty" json:"port,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty" json:"nodeFilters,omitempty"`
}

type LabelWithNodeFilters struct {
	Label       string   `mapstructure:"label" yaml:"label,omitempty" json:"label,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty" json:"nodeFilters,omitempty"`
}

type EnvVarWithNodeFilters struct {
	EnvVar      string   `mapstructure:"envVar" yaml:"envVar,omitempty" json:"envVar,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty" json:"nodeFilters,omitempty"`
}

type K3sArgWithNodeFilters struct {
	Arg         string   `mapstructure:"arg" yaml:"arg,omitempty" json:"arg,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty" json:"nodeFilters,omitempty"`
}

type SimpleConfigRegistryCreateConfig struct {
	Name     string   `mapstructure:"name" yaml:"name,omitempty" json:"name,omitempty"`
	Host     string   `mapstructure:"host" yaml:"host,omitempty" json:"host,omitempty"`
	HostPort string   `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
	Image    string   `mapstructure:"image" yaml:"image,omitempty" json:"image,omitempty"`
	Volumes  []string `mapstructure:"volumes" yaml:"volumes,omitempty" json:"volumes,omitempty"`
}

// SimpleConfigOptionsKubeconfig describes the set of options referring to the kubeconfig during cluster creation.
type SimpleConfigOptionsKubeconfig struct {
	UpdateDefaultKubeconfig bool `mapstructure:"updateDefaultKubeconfig" yaml:"updateDefaultKubeconfig,omitempty" json:"updateDefaultKubeconfig,omitempty"` // default: true
	SwitchCurrentContext    bool `mapstructure:"switchCurrentContext" yaml:"switchCurrentContext,omitempty" json:"switchCurrentContext,omitempty"`          //nolint:lll    // default: true
}

type SimpleConfigOptions struct {
	K3dOptions        SimpleConfigOptionsK3d        `mapstructure:"k3d" yaml:"k3d" json:"k3d"`
	K3sOptions        SimpleConfigOptionsK3s        `mapstructure:"k3s" yaml:"k3s" json:"k3s"`
	KubeconfigOptions SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig" json:"kubeconfig"`
	Runtime           SimpleConfigOptionsRuntime    `mapstructure:"runtime" yaml:"runtime" json:"runtime"`
}

type SimpleConfigOptionsRuntime struct {
	GPURequest    string                 `mapstructure:"gpuRequest" yaml:"gpuRequest,omitempty" json:"gpuRequest,omitempty"`
	ServersMemory string                 `mapstructure:"serversMemory" yaml:"serversMemory,omitempty" json:"serversMemory,omitempty"`
	AgentsMemory  string                 `mapstructure:"agentsMemory" yaml:"agentsMemory,omitempty" json:"agentsMemory,omitempty"`
	HostPidMode   bool                   `mapstructure:"hostPidMode" yyaml:"hostPidMode,omitempty" json:"hostPidMode,omitempty"`
	Labels        []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels,omitempty" json:"labels,omitempty"`
}

type SimpleConfigOptionsK3d struct {
	Wait                bool                               `mapstructure:"wait" yaml:"wait" json:"wait"`
	Timeout             time.Duration                      `mapstructure:"timeout" yaml:"timeout,omitempty" json:"timeout,omitempty"`
	DisableLoadbalancer bool                               `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer" json:"disableLoadbalancer"`
	DisableImageVolume  bool                               `mapstructure:"disableImageVolume" yaml:"disableImageVolume" json:"disableImageVolume"`
	NoRollback          bool                               `mapstructure:"disableRollback" yaml:"disableRollback" json:"disableRollback"`
	Loadbalancer        SimpleConfigOptionsK3dLoadbalancer `mapstructure:"loadbalancer" yaml:"loadbalancer,omitempty" json:"loadbalancer,omitempty"`
}

type SimpleConfigOptionsK3dLoadbalancer struct {
	ConfigOverrides []string `mapstructure:"configOverrides" yaml:"configOverrides,omitempty" json:"configOverrides,omitempty"`
}

type SimpleConfigOptionsK3s struct {
	ExtraArgs  []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs,omitempty" json:"extraArgs,omitempty"`
	NodeLabels []LabelWithNodeFilters  `mapstructure:"nodeLabels" yaml:"nodeLabels,omitempty" json:"nodeLabels,omitempty"`
}

type SimpleConfigRegistries struct {
	Use    []string                          `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"`
	Create *SimpleConfigRegistryCreateConfig `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"`
	Config string                            `mapstructure:"config" yaml:"config,omitempty" json:"config,omitempty"` // registries.yaml (k3s config for containerd registry override)
}

type SimpleConfigHostAlias struct {
	IP        string   `mapstructure:"ip" yaml:"ip" json:"ip"`
	Hostnames []string `mapstructure:"hostnames" yaml:"hostnames" json:"hostnames"`
}

// SimpleConfig describes the toplevel k3d configuration file.
type SimpleConfig struct {
	TypeMeta     `yaml:",inline"`
	ObjectMeta   `mapstructure:"metadata" yaml:"metadata,omitempty" json:"metadata,omitempty"`
	Servers      int                     `mapstructure:"servers" yaml:"servers,omitempty" json:"servers,omitempty"` //nolint:lll    // default 1
	Agents       int                     `mapstructure:"agents" yaml:"agents,omitempty" json:"agents,omitempty"`    //nolint:lll    // default 0
	ExposeAPI    SimpleExposureOpts      `mapstructure:"kubeAPI" yaml:"kubeAPI,omitempty" json:"kubeAPI,omitempty"`
	Image        string                  `mapstructure:"image" yaml:"image,omitempty" json:"image,omitempty"`
	Network      string                  `mapstructure:"network" yaml:"network,omitempty" json:"network,omitempty"`
	Subnet       string                  `mapstructure:"subnet" yaml:"subnet,omitempty" json:"subnet,omitempty"`
	ClusterToken string                  `mapstructure:"token" yaml:"clusterToken,omitempty" json:"clusterToken,omitempty"` // default: auto-generated
	Volumes      []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes,omitempty" json:"volumes,omitempty"`
	Ports        []PortWithNodeFilters   `mapstructure:"ports" yaml:"ports,omitempty" json:"ports,omitempty"`
	Options      SimpleConfigOptions     `mapstructure:"options" yaml:"options,omitempty" json:"options,omitempty"`
	Env          []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env,omitempty" json:"env,omitempty"`
	Registries   SimpleConfigRegistries  `mapstructure:"registries" yaml:"registries,omitempty" json:"registries,omitempty"`
	HostAliases  []SimpleConfigHostAlias `mapstructure:"hostAliases" yaml:"hostAliases,omitempty" json:"hostAliases,omitempty"`
}

// SimpleExposureOpts provides a simplified syntax compared to the original k3d.ExposureOpts
type SimpleExposureOpts struct {
	Host     string `mapstructure:"host" yaml:"host,omitempty" json:"host,omitempty"`
	HostIP   string `mapstructure:"hostIP" yaml:"hostIP,omitempty" json:"hostIP,omitempty"`
	HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
}
0707010000004D000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000002200000000ctlptl-0.8.43/pkg/api/k3dv1alpha50707010000004E000081A400000000000000000000000168AFB0EA00000084000000000000000000000000000000000000002900000000ctlptl-0.8.43/pkg/api/k3dv1alpha5/doc.go// Package k3dv1alpha5 implements the v1alpha4 apiVersion of k3d's config file.
//
// +k8s:deepcopy-gen=package
package k3dv1alpha5
0707010000004F000081A400000000000000000000000168AFB0EA00003331000000000000000000000000000000000000003800000000ctlptl-0.8.43/pkg/api/k3dv1alpha5/generated.deepcopy.go//go:build !ignore_autogenerated
// +build !ignore_autogenerated

/*
Copyright 2020 Tilt Dev

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// Code generated by deepcopy-gen. DO NOT EDIT.

package k3dv1alpha5

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvVarWithNodeFilters) DeepCopyInto(out *EnvVarWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarWithNodeFilters.
func (in *EnvVarWithNodeFilters) DeepCopy() *EnvVarWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(EnvVarWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sArgWithNodeFilters) DeepCopyInto(out *K3sArgWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sArgWithNodeFilters.
func (in *K3sArgWithNodeFilters) DeepCopy() *K3sArgWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(K3sArgWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelWithNodeFilters) DeepCopyInto(out *LabelWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelWithNodeFilters.
func (in *LabelWithNodeFilters) DeepCopy() *LabelWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(LabelWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
func (in *ObjectMeta) DeepCopy() *ObjectMeta {
	if in == nil {
		return nil
	}
	out := new(ObjectMeta)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortWithNodeFilters) DeepCopyInto(out *PortWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortWithNodeFilters.
func (in *PortWithNodeFilters) DeepCopy() *PortWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(PortWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfig) DeepCopyInto(out *SimpleConfig) {
	*out = *in
	out.TypeMeta = in.TypeMeta
	out.ObjectMeta = in.ObjectMeta
	out.ExposeAPI = in.ExposeAPI
	if in.Volumes != nil {
		in, out := &in.Volumes, &out.Volumes
		*out = make([]VolumeWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	if in.Ports != nil {
		in, out := &in.Ports, &out.Ports
		*out = make([]PortWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	in.Options.DeepCopyInto(&out.Options)
	if in.Env != nil {
		in, out := &in.Env, &out.Env
		*out = make([]EnvVarWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	in.Registries.DeepCopyInto(&out.Registries)
	if in.HostAliases != nil {
		in, out := &in.HostAliases, &out.HostAliases
		*out = make([]SimpleConfigHostAlias, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfig.
func (in *SimpleConfig) DeepCopy() *SimpleConfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigHostAlias) DeepCopyInto(out *SimpleConfigHostAlias) {
	*out = *in
	if in.Hostnames != nil {
		in, out := &in.Hostnames, &out.Hostnames
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigHostAlias.
func (in *SimpleConfigHostAlias) DeepCopy() *SimpleConfigHostAlias {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigHostAlias)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptions) DeepCopyInto(out *SimpleConfigOptions) {
	*out = *in
	in.K3dOptions.DeepCopyInto(&out.K3dOptions)
	in.K3sOptions.DeepCopyInto(&out.K3sOptions)
	out.KubeconfigOptions = in.KubeconfigOptions
	in.Runtime.DeepCopyInto(&out.Runtime)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptions.
func (in *SimpleConfigOptions) DeepCopy() *SimpleConfigOptions {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptions)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3d) DeepCopyInto(out *SimpleConfigOptionsK3d) {
	*out = *in
	in.Loadbalancer.DeepCopyInto(&out.Loadbalancer)
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3d.
func (in *SimpleConfigOptionsK3d) DeepCopy() *SimpleConfigOptionsK3d {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3d)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3dLoadbalancer) DeepCopyInto(out *SimpleConfigOptionsK3dLoadbalancer) {
	*out = *in
	if in.ConfigOverrides != nil {
		in, out := &in.ConfigOverrides, &out.ConfigOverrides
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3dLoadbalancer.
func (in *SimpleConfigOptionsK3dLoadbalancer) DeepCopy() *SimpleConfigOptionsK3dLoadbalancer {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3dLoadbalancer)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsK3s) DeepCopyInto(out *SimpleConfigOptionsK3s) {
	*out = *in
	if in.ExtraArgs != nil {
		in, out := &in.ExtraArgs, &out.ExtraArgs
		*out = make([]K3sArgWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	if in.NodeLabels != nil {
		in, out := &in.NodeLabels, &out.NodeLabels
		*out = make([]LabelWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsK3s.
func (in *SimpleConfigOptionsK3s) DeepCopy() *SimpleConfigOptionsK3s {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsK3s)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsKubeconfig) DeepCopyInto(out *SimpleConfigOptionsKubeconfig) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsKubeconfig.
func (in *SimpleConfigOptionsKubeconfig) DeepCopy() *SimpleConfigOptionsKubeconfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsKubeconfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigOptionsRuntime) DeepCopyInto(out *SimpleConfigOptionsRuntime) {
	*out = *in
	if in.Labels != nil {
		in, out := &in.Labels, &out.Labels
		*out = make([]LabelWithNodeFilters, len(*in))
		for i := range *in {
			(*in)[i].DeepCopyInto(&(*out)[i])
		}
	}
	if in.Ulimits != nil {
		in, out := &in.Ulimits, &out.Ulimits
		*out = make([]Ulimit, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigOptionsRuntime.
func (in *SimpleConfigOptionsRuntime) DeepCopy() *SimpleConfigOptionsRuntime {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigOptionsRuntime)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigRegistries) DeepCopyInto(out *SimpleConfigRegistries) {
	*out = *in
	if in.Use != nil {
		in, out := &in.Use, &out.Use
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	if in.Create != nil {
		in, out := &in.Create, &out.Create
		*out = new(SimpleConfigRegistryCreateConfig)
		(*in).DeepCopyInto(*out)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigRegistries.
func (in *SimpleConfigRegistries) DeepCopy() *SimpleConfigRegistries {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigRegistries)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleConfigRegistryCreateConfig) DeepCopyInto(out *SimpleConfigRegistryCreateConfig) {
	*out = *in
	if in.Volumes != nil {
		in, out := &in.Volumes, &out.Volumes
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleConfigRegistryCreateConfig.
func (in *SimpleConfigRegistryCreateConfig) DeepCopy() *SimpleConfigRegistryCreateConfig {
	if in == nil {
		return nil
	}
	out := new(SimpleConfigRegistryCreateConfig)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SimpleExposureOpts) DeepCopyInto(out *SimpleExposureOpts) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleExposureOpts.
func (in *SimpleExposureOpts) DeepCopy() *SimpleExposureOpts {
	if in == nil {
		return nil
	}
	out := new(SimpleExposureOpts)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypeMeta) DeepCopyInto(out *TypeMeta) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta.
func (in *TypeMeta) DeepCopy() *TypeMeta {
	if in == nil {
		return nil
	}
	out := new(TypeMeta)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ulimit) DeepCopyInto(out *Ulimit) {
	*out = *in
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ulimit.
func (in *Ulimit) DeepCopy() *Ulimit {
	if in == nil {
		return nil
	}
	out := new(Ulimit)
	in.DeepCopyInto(out)
	return out
}

// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeWithNodeFilters) DeepCopyInto(out *VolumeWithNodeFilters) {
	*out = *in
	if in.NodeFilters != nil {
		in, out := &in.NodeFilters, &out.NodeFilters
		*out = make([]string, len(*in))
		copy(*out, *in)
	}
	return
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeWithNodeFilters.
func (in *VolumeWithNodeFilters) DeepCopy() *VolumeWithNodeFilters {
	if in == nil {
		return nil
	}
	out := new(VolumeWithNodeFilters)
	in.DeepCopyInto(out)
	return out
}
07070100000050000081A400000000000000000000000168AFB0EA000020E6000000000000000000000000000000000000002B00000000ctlptl-0.8.43/pkg/api/k3dv1alpha5/types.go/*
Copyright © 2020-2022 The k3d Author(s)

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/

package k3dv1alpha5

import (
	"time"
)

// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta
// No need for a direct dependence; the fields are stable.
type TypeMeta struct {
	Kind       string `yaml:"kind,omitempty"`
	APIVersion string `yaml:"apiVersion,omitempty"`
}

type ObjectMeta struct {
	Name string `mapstructure:"name,omitempty" yaml:"name,omitempty"`
}

type RegistryProxy struct {
	RemoteURL string `yaml:"remoteURL"`
	Username  string `yaml:"username,omitempty"`
	Password  string `yaml:"password,omitempty"`
}

type VolumeWithNodeFilters struct {
	Volume      string   `mapstructure:"volume" yaml:"volume,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type PortWithNodeFilters struct {
	Port        string   `mapstructure:"port" yaml:"port,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type LabelWithNodeFilters struct {
	Label       string   `mapstructure:"label" yaml:"label,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type EnvVarWithNodeFilters struct {
	EnvVar      string   `mapstructure:"envVar" yaml:"envVar,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type K3sArgWithNodeFilters struct {
	Arg         string   `mapstructure:"arg" yaml:"arg,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type FileWithNodeFilters struct {
	Source      string   `mapstructure:"source" yaml:"source,omitempty"`
	Destination string   `mapstructure:"destination" yaml:"destination,omitempty"`
	Description string   `mapstructure:"description" yaml:"description,omitempty"`
	NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters,omitempty"`
}

type SimpleConfigRegistryCreateConfig struct {
	Name     string        `mapstructure:"name" yaml:"name,omitempty"`
	Host     string        `mapstructure:"host" yaml:"host,omitempty"`
	HostPort string        `mapstructure:"hostPort" yaml:"hostPort,omitempty"`
	Image    string        `mapstructure:"image" yaml:"image,omitempty"`
	Proxy    RegistryProxy `mapstructure:"proxy" yaml:"proxy,omitempty"`
	Volumes  []string      `mapstructure:"volumes" yaml:"volumes,omitempty"`
}

// SimpleConfigOptionsKubeconfig describes the set of options referring to the kubeconfig during cluster creation.
type SimpleConfigOptionsKubeconfig struct {
	UpdateDefaultKubeconfig bool `mapstructure:"updateDefaultKubeconfig" yaml:"updateDefaultKubeconfig,omitempty"` // default: true
	SwitchCurrentContext    bool `mapstructure:"switchCurrentContext" yaml:"switchCurrentContext,omitempty"`       //nolint:lll    // default: true
}

type SimpleConfigOptions struct {
	K3dOptions        SimpleConfigOptionsK3d        `mapstructure:"k3d" yaml:"k3d"`
	K3sOptions        SimpleConfigOptionsK3s        `mapstructure:"k3s" yaml:"k3s"`
	KubeconfigOptions SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"`
	Runtime           SimpleConfigOptionsRuntime    `mapstructure:"runtime" yaml:"runtime"`
}

type SimpleConfigOptionsRuntime struct {
	GPURequest    string                 `mapstructure:"gpuRequest" yaml:"gpuRequest,omitempty"`
	ServersMemory string                 `mapstructure:"serversMemory" yaml:"serversMemory,omitempty"`
	AgentsMemory  string                 `mapstructure:"agentsMemory" yaml:"agentsMemory,omitempty"`
	HostPidMode   bool                   `mapstructure:"hostPidMode" yyaml:"hostPidMode,omitempty"`
	Labels        []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels,omitempty"`
	Ulimits       []Ulimit               `mapstructure:"ulimits" yaml:"ulimits,omitempty"`
}

type Ulimit struct {
	Name string `mapstructure:"name" yaml:"name"`
	Soft int64  `mapstructure:"soft" yaml:"soft"`
	Hard int64  `mapstructure:"hard" yaml:"hard"`
}

type SimpleConfigOptionsK3d struct {
	Wait                bool                               `mapstructure:"wait" yaml:"wait"`
	Timeout             time.Duration                      `mapstructure:"timeout" yaml:"timeout,omitempty"`
	DisableLoadbalancer bool                               `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer"`
	DisableImageVolume  bool                               `mapstructure:"disableImageVolume" yaml:"disableImageVolume"`
	NoRollback          bool                               `mapstructure:"disableRollback" yaml:"disableRollback"`
	Loadbalancer        SimpleConfigOptionsK3dLoadbalancer `mapstructure:"loadbalancer" yaml:"loadbalancer,omitempty"`
}

type SimpleConfigOptionsK3dLoadbalancer struct {
	ConfigOverrides []string `mapstructure:"configOverrides" yaml:"configOverrides,omitempty"`
}

type SimpleConfigOptionsK3s struct {
	ExtraArgs  []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs,omitempty"`
	NodeLabels []LabelWithNodeFilters  `mapstructure:"nodeLabels" yaml:"nodeLabels,omitempty"`
}

type SimpleConfigRegistries struct {
	Use    []string                          `mapstructure:"use" yaml:"use,omitempty"`
	Create *SimpleConfigRegistryCreateConfig `mapstructure:"create" yaml:"create,omitempty"`
	Config string                            `mapstructure:"config" yaml:"config,omitempty"` // registries.yaml (k3s config for containerd registry override)
}

type SimpleConfigHostAlias struct {
	IP        string   `mapstructure:"ip" yaml:"ip" json:"ip"`
	Hostnames []string `mapstructure:"hostnames" yaml:"hostnames" json:"hostnames"`
}

// SimpleConfig describes the toplevel k3d configuration file.
type SimpleConfig struct {
	TypeMeta     `mapstructure:",squash" yaml:",inline"`
	ObjectMeta   `mapstructure:"metadata" yaml:"metadata,omitempty"`
	Servers      int                     `mapstructure:"servers" yaml:"servers,omitempty"` //nolint:lll    // default 1
	Agents       int                     `mapstructure:"agents" yaml:"agents,omitempty"`   //nolint:lll    // default 0
	ExposeAPI    SimpleExposureOpts      `mapstructure:"kubeAPI" yaml:"kubeAPI,omitempty"`
	Image        string                  `mapstructure:"image" yaml:"image,omitempty"`
	Network      string                  `mapstructure:"network" yaml:"network,omitempty"`
	Subnet       string                  `mapstructure:"subnet" yaml:"subnet,omitempty"`
	ClusterToken string                  `mapstructure:"token" yaml:"clusterToken,omitempty"` // default: auto-generated
	Volumes      []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes,omitempty"`
	Ports        []PortWithNodeFilters   `mapstructure:"ports" yaml:"ports,omitempty"`
	Options      SimpleConfigOptions     `mapstructure:"options" yaml:"options,omitempty"`
	Env          []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env,omitempty"`
	Registries   SimpleConfigRegistries  `mapstructure:"registries" yaml:"registries,omitempty"`
	HostAliases  []SimpleConfigHostAlias `mapstructure:"hostAliases" yaml:"hostAliases,omitempty"`
	Files        []FileWithNodeFilters   `mapstructure:"files" yaml:"files,omitempty"`
}

// SimpleExposureOpts provides a simplified syntax compared to the original k3d.ExposureOpts
type SimpleExposureOpts struct {
	Host     string `mapstructure:"host" yaml:"host,omitempty"`
	HostIP   string `mapstructure:"hostIP" yaml:"hostIP,omitempty"`
	HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty"`
}
07070100000051000081A400000000000000000000000168AFB0EA0000063D000000000000000000000000000000000000002000000000ctlptl-0.8.43/pkg/api/schema.gopackage api

import (
	runtime "k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/runtime/schema"
)

func (obj *Cluster) GetObjectKind() schema.ObjectKind { return obj }
func (obj *Cluster) SetGroupVersionKind(gvk schema.GroupVersionKind) {
	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
func (obj *Cluster) GroupVersionKind() schema.GroupVersionKind {
	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}

var _ runtime.Object = &Cluster{}

func (obj *ClusterList) GetObjectKind() schema.ObjectKind { return obj }
func (obj *ClusterList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
func (obj *ClusterList) GroupVersionKind() schema.GroupVersionKind {
	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}

var _ runtime.Object = &ClusterList{}

func (obj *Registry) GetObjectKind() schema.ObjectKind { return obj }
func (obj *Registry) SetGroupVersionKind(gvk schema.GroupVersionKind) {
	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
func (obj *Registry) GroupVersionKind() schema.GroupVersionKind {
	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}

var _ runtime.Object = &Registry{}

func (obj *RegistryList) GetObjectKind() schema.ObjectKind { return obj }
func (obj *RegistryList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
func (obj *RegistryList) GroupVersionKind() schema.GroupVersionKind {
	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}

var _ runtime.Object = &RegistryList{}
07070100000052000081A400000000000000000000000168AFB0EA00002B65000000000000000000000000000000000000001F00000000ctlptl-0.8.43/pkg/api/types.gopackage api

import (
	"github.com/tilt-dev/localregistry-go"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"sigs.k8s.io/kind/pkg/apis/config/v1alpha4"

	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha4"
	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha5"
)

// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta
// No need for a direct dependence; the fields are stable.
type TypeMeta struct {
	Kind       string `json:"kind,omitempty" yaml:"kind,omitempty"`
	APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
}

// RegistryAuth contains configuration for pull-through registries
type RegistryAuth struct {
	// The FQDN of the registry (i.e. docker.io)
	Host string `json:"host,omitempty" yaml:"host,omitempty"`

	// The Endpoint of the registry (i.e. https://registry-1.docker.io)
	Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
	Username string `json:"username,omitempty" yaml:"username,omitempty"`
	Password string `json:"password,omitempty" yaml:"password,omitempty"`
}

// Cluster contains cluster configuration.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Cluster struct {
	TypeMeta `yaml:",inline"`

	// The cluster name. Pulled from .kube/config.
	Name string `json:"name,omitempty" yaml:"name,omitempty"`

	// The name of the tool used to create this cluster.
	Product string `json:"product,omitempty" yaml:"product,omitempty"`

	// Make sure that the cluster has access to at least this many
	// CPUs. This is mostly helpful for ensuring that your Docker Desktop
	// VM has enough CPU. If ctlptl can't guarantee this many
	// CPU, it will return an error.
	MinCPUs int `json:"minCPUs,omitempty" yaml:"minCPUs,omitempty"`

	// The name of a registry.
	//
	// If the registry doesn't exist, ctlptl will create one with this name.
	//
	// The registry can be configured by creating a `kind: Registry` config file.
	//
	// Not supported on all cluster products.
	Registry string `json:"registry,omitempty" yaml:"registry,omitempty"`

	// A list of pull-through registries to configure on the cluster.
	//
	// Not supported on all cluster products.
	RegistryAuths []RegistryAuth `json:"registryAuths,omitempty" yaml:"registryAuths,omitempty"`

	// The desired version of Kubernetes to run.
	//
	// Examples:
	// v1.19.1
	// v1.14.0
	// Must start with 'v' and contain a major, minor, and patch version.
	//
	// Not all cluster products allow you to customize this.
	KubernetesVersion string `json:"kubernetesVersion,omitempty" yaml:"kubernetesVersion,omitempty"`

	// The Kind cluster config. Only applicable for clusters with product: kind.
	//
	// Full documentation at:
	// https://pkg.go.dev/sigs.k8s.io/kind/pkg/apis/config/v1alpha4#Cluster
	//
	// Properties of this config may be overridden by properties of the ctlptl
	// Cluster config. For example, the name field of the top-level Cluster object
	// wins over one specified in the Kind config.
	KindV1Alpha4Cluster *v1alpha4.Cluster `json:"kindV1Alpha4Cluster,omitempty" yaml:"kindV1Alpha4Cluster,omitempty"`

	// Extra command line arguments passed to Kind create CLI. Only applicable to clusters with the product: kind.
	KindExtraCreateArguments []string `json:"kindExtraCreateArguments,omitempty" yaml:"kindExtraCreateArguments,omitempty"`

	// The Minikube cluster config. Only applicable for clusters with product: minikube.
	Minikube *MinikubeCluster `json:"minikube,omitempty" yaml:"minikube,omitempty"`

	// The K3D cluster config. Only applicable for clusters with product: k3d.
	K3D *K3DCluster `json:"k3d,omitempty" yaml:"k3d,omitempty"`

	// Most recently observed status of the cluster.
	// Populated by the system.
	// Read-only.
	Status ClusterStatus `json:"status,omitempty" yaml:"status,omitempty"`
}

type ClusterStatus struct {
	// When the cluster was first created.
	CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`

	// Local registry status documented on the cluster itself.
	LocalRegistryHosting *localregistry.LocalRegistryHostingV1 `json:"localRegistryHosting,omitempty" yaml:"localRegistryHosting,omitempty"`

	// The number of CPU. Only applicable to local clusters.
	CPUs int `json:"cpus,omitempty" yaml:"cpus,omitempty"`

	// Whether this is the current cluster in `kubectl`
	Current bool `json:"current,omitempty" yaml:"current,omitempty"`

	// The version of Kubernetes currently running.
	//
	// Reported by the Kubernetes API. May contain a build tag.
	//
	// Examples:
	// v1.19.1
	// v1.18.10-gke.601
	// v1.19.3-34+fa32ff1c160058
	KubernetesVersion string `json:"kubernetesVersion,omitempty" yaml:"kubernetesVersion,omitempty"`

	// Populated when we encounter an error reading the cluster status.
	Error string `json:"error,omitempty"`
}

// MinikubeCluster describes minikube-specific options for starting a cluster.
//
// Options in this struct, when possible, should match the flags
// to `minikube start`.
//
// Prefer setting features on the ClusterSpec rather than on the MinikubeCluster
// object when possible. For example, this object doesn't have a `kubernetesVersion`
// field, because it's supported by ClusterSpec.
//
// ctlptl's logic for diffing clusters and applying changes is less robust
// for cluster-specific config flags.
type MinikubeCluster struct {
	// The container runtime of the cluster. Defaults to containerd.
	ContainerRuntime string `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty"`

	// Extra config options passed directly to Minikube's --extra-config flags.
	// When not set, we will default to starting minikube with these configs:
	//
	// kubelet.max-pods=500
	ExtraConfigs []string `json:"extraConfigs,omitempty" yaml:"extraConfigs,omitempty"`

	// Unstructured flags to pass to minikube on `minikube start`.
	// These flags will be passed before all tilt-determined flags.
	StartFlags []string `json:"startFlags,omitempty" yaml:"startFlags,omitempty"`
}

// K3DCluster describes k3d-specific options for starting a cluster.
//
// Prefer setting features on the ClusterSpec rather than on the K3dCluster
// object when possible.
//
// ctlptl's logic for diffing clusters and applying changes is less robust
// for cluster-specific configs.
type K3DCluster struct {
	// K3D's own cluster config format, v1alpha5.
	//
	// Documentation: https://k3d.io/v5.6.0/usage/configfile/
	//
	// Uses this schema: https://github.com/k3d-io/k3d/blob/v5.6.0/pkg/config/v1alpha5/types.go
	V1Alpha5Simple *k3dv1alpha5.SimpleConfig `json:"v1alpha5Simple,omitempty" yaml:"v1alpha5Simple,omitempty"`

	// K3D's own cluster config format, v1alpha4.
	//
	// Documentation: https://k3d.io/v5.4.6/usage/configfile/
	//
	// Uses this schema: https://github.com/k3d-io/k3d/blob/v5.4.6/pkg/config/v1alpha4/types.go
	V1Alpha4Simple *k3dv1alpha4.SimpleConfig `json:"v1alpha4Simple,omitempty" yaml:"v1alpha4Simple,omitempty"`
}

// ClusterList is a list of Clusters.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ClusterList struct {
	TypeMeta `json:",inline"`

	// List of clusters.
	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
	Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"`
}

// Cluster contains registry configuration.
//
// Currently designed for local registries on the host machine, but
// may eventually expand to support remote registries.
//
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Registry struct {
	TypeMeta `yaml:",inline"`

	// The registry name. Get/set from the Docker container name.
	Name string `json:"name,omitempty" yaml:"name,omitempty"`

	// The host IPv4 address to bind the container to.
	ListenAddress string `json:"listenAddress,omitempty" yaml:"listenAddress,omitempty"`

	// The desired host port. Set to 0 to choose a random port,
	// or to preserve the existing port.
	Port int `json:"port,omitempty" yaml:"port,omitempty"`

	// Labels that must be attached to the running registry.
	//
	// If you change the set of labels, the registry must be stopped and
	// restarted.
	//
	// Important for K3d, which will only connect to registries
	// that are tagged "app: k3d".
	Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`

	// Environment vars to use for registry container (optional).
	//
	// Can be used to change some parameters likes REGISTRY_HTTP_ADDR, REGISTRY_PROXY_REMOTEURL
	Env []string `json:"env,omitempty" yaml:"env,omitempty"`

	// Image to use for registry container (optional).
	//
	// Can be used to provide an alternate image or use a different registry
	// than Docker Hub.
	//
	// Defaults to `docker.io/library/registry:2`.
	Image string `json:"image,omitempty" yaml:"image,omitempty"`

	// Most recently observed status of the registry.
	// Populated by the system.
	// Read-only.
	Status RegistryStatus `json:"status,omitempty" yaml:"status,omitempty"`
}

type RegistryStatus struct {
	// When the registry was first created.
	CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`

	// The IPv4 address for the bridge network.
	IPAddress string `json:"ipAddress,omitempty" yaml:"ipAddress,omitempty"`

	// The public IPv4 address that the registry is listening on on the host machine.
	ListenAddress string `json:"listenAddress,omitempty" yaml:"listenAddress,omitempty"`

	// The public port that the registry is listening on on the host machine.
	HostPort int `json:"hostPort,omitempty" yaml:"hostPort,omitempty"`

	// The private port that the registry is listening on inside the registry network.
	//
	// We try to make this not configurable, because there's no real reason not
	// to use the default registry port 5000.
	ContainerPort int `json:"containerPort,omitempty" yaml:"containerPort,omitempty"`

	// Networks that the registry container is connected to.
	Networks []string `json:"networks,omitempty" yaml:"networks,omitempty"`

	// The ID of the container in Docker.
	ContainerID string `json:"containerId,omitempty" yaml:"containerId,omitempty"`

	// Current health status of the registry container.
	// Reflects underlying ContainerState.Status
	// https://github.com/moby/moby/blob/v20.10.3/api/types/types.go#L314
	State string

	// Labels attached to the running container.
	Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`

	// Env attached to the running container.
	Env []string `json:"env,omitempty" yaml:"env,omitempty"`

	// Image for the running container.
	Image string `json:"image,omitempty" yaml:"image,omitempty"`

	// Warnings that occurred when reporting the registry status.
	Warnings []string `json:"warnings,omitempty" yaml:"warnings,omitempty"`
}

// RegistryList is a list of Registrys.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type RegistryList struct {
	TypeMeta `json:",inline"`

	// List of registrys.
	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
	Items []Registry `json:"items" protobuf:"bytes,2,rep,name=items"`
}
07070100000053000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001A00000000ctlptl-0.8.43/pkg/cluster07070100000054000081A400000000000000000000000168AFB0EA000005AA000000000000000000000000000000000000002300000000ctlptl-0.8.43/pkg/cluster/admin.gopackage cluster

import (
	"context"

	"github.com/tilt-dev/localregistry-go"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

// A cluster admin provides the basic start/stop functionality of a cluster,
// independent of the configuration of the machine it's running on.
type Admin interface {
	EnsureInstalled(ctx context.Context) error

	// Create a new cluster.
	//
	// Make a best effort attempt to delete any resources that might block creation
	// of the cluster.
	Create(ctx context.Context, desired *api.Cluster, registry *api.Registry) error

	// Infers the LocalRegistryHosting that this admin will try to configure.
	LocalRegistryHosting(ctx context.Context, desired *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error)

	Delete(ctx context.Context, config *api.Cluster) error
}

// An extension of cluster admin that indicates the cluster configuration can be
// modified for use from inside containers.
type AdminInContainer interface {
	ModifyConfigInContainer(ctx context.Context, cluster *api.Cluster, containerID string, dockerClient dctr.Client, configWriter configWriter) error
}

// Containerd made major changes to their config format for
// configuring registries. Each cluster has its own way
// of detecting this.

type containerdRegistryAPI int

const (
	containerdRegistryV1 containerdRegistryAPI = iota
	containerdRegistryV2
	containerdRegistryBroken
)
07070100000055000081A400000000000000000000000168AFB0EA000007E5000000000000000000000000000000000000003200000000ctlptl-0.8.43/pkg/cluster/admin_docker_desktop.gopackage cluster

import (
	"context"
	"fmt"

	"github.com/tilt-dev/localregistry-go"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/docker"
)

// The DockerDesktop manages the Kubernetes cluster for DockerDesktop.
// This is a bit different than the other admins, due to the overlap
type dockerDesktopAdmin struct {
	os     string
	host   string
	client d4mClient
}

func newDockerDesktopAdmin(host string, os string, d4m d4mClient) *dockerDesktopAdmin {
	return &dockerDesktopAdmin{os: os, host: host, client: d4m}
}

func (a *dockerDesktopAdmin) EnsureInstalled(ctx context.Context) error { return nil }
func (a *dockerDesktopAdmin) Create(ctx context.Context, desired *api.Cluster, registry *api.Registry) error {
	if registry != nil {
		return fmt.Errorf("ctlptl currently does not support connecting a registry to docker-desktop")
	}
	if len(desired.RegistryAuths) > 0 {
		return fmt.Errorf("ctlptl currently does not support connecting pull-through registries to docker-desktop")
	}

	isLocalDockerDesktop := docker.IsLocalDockerDesktop(a.host, a.os)
	if !isLocalDockerDesktop {
		return fmt.Errorf("docker-desktop clusters are only available on a local Docker Desktop. Current DOCKER_HOST: %s",
			a.host)
	}

	err := a.client.ResetCluster(ctx)
	if err != nil {
		return err
	}

	return nil
}

func (a *dockerDesktopAdmin) LocalRegistryHosting(ctx context.Context, desired *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error) {
	return nil, nil
}

func (a *dockerDesktopAdmin) Delete(ctx context.Context, config *api.Cluster) error {
	isLocalDockerHost := docker.IsLocalDockerDesktop(a.host, a.os)
	if !isLocalDockerHost {
		return fmt.Errorf("docker-desktop cannot be deleted from DOCKER_HOST: %s", a.host)
	}

	settings, err := a.client.settings(ctx)
	if err != nil {
		return err
	}

	changed, err := a.client.setK8sEnabled(settings, false)
	if err != nil {
		return err
	}
	if !changed {
		return nil
	}

	return a.client.writeSettings(ctx, settings)
}
07070100000056000081A400000000000000000000000168AFB0EA000005C7000000000000000000000000000000000000002B00000000ctlptl-0.8.43/pkg/cluster/admin_helpers.gopackage cluster

import (
	"context"
	"fmt"
	"strings"

	"github.com/pkg/errors"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

func applyContainerdPatchRegistryAPIV2(
	ctx context.Context, runner exec.CmdRunner, iostreams genericclioptions.IOStreams,
	nodes []string, desired *api.Cluster, registry *api.Registry) error {
	for _, node := range nodes {
		contents := fmt.Sprintf(`[host."http://%s:%d"]
`, registry.Name, registry.Status.ContainerPort)

		localRegistryDir := fmt.Sprintf("/etc/containerd/certs.d/localhost:%d", registry.Status.HostPort)
		err := runner.RunIO(ctx,
			genericclioptions.IOStreams{In: strings.NewReader(contents), Out: iostreams.Out, ErrOut: iostreams.ErrOut},
			"docker", "exec", "-i", node, "sh", "-c",
			fmt.Sprintf("mkdir -p %s && cp /dev/stdin %s/hosts.toml", localRegistryDir, localRegistryDir))
		if err != nil {
			return errors.Wrap(err, "configuring registry")
		}

		networkRegistryDir := fmt.Sprintf("/etc/containerd/certs.d/%s:%d", registry.Name, registry.Status.ContainerPort)
		err = runner.RunIO(ctx,
			genericclioptions.IOStreams{In: strings.NewReader(contents), Out: iostreams.Out, ErrOut: iostreams.ErrOut},
			"docker", "exec", "-i", node, "sh", "-c",
			fmt.Sprintf("mkdir -p %s && cp /dev/stdin %s/hosts.toml", networkRegistryDir, networkRegistryDir))
		if err != nil {
			return errors.Wrap(err, "configuring registry")
		}
	}
	return nil
}
07070100000057000081A400000000000000000000000168AFB0EA0000192D000000000000000000000000000000000000002700000000ctlptl-0.8.43/pkg/cluster/admin_k3d.gopackage cluster

import (
	"bytes"
	"context"
	"fmt"
	"os/exec"
	"strings"

	"github.com/blang/semver/v4"
	"github.com/pkg/errors"
	"github.com/tilt-dev/localregistry-go"
	"gopkg.in/yaml.v3"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	"k8s.io/klog/v2"

	cexec "github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha4"
	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha5"
)

// Support for v1alpha4 file format starts in 5.3.0.
var v5_3 = semver.MustParse("5.3.0")

// Support for v1alpha5 file format starts in 5.5.0.
var v5_5 = semver.MustParse("5.5.0")

// k3dAdmin uses the k3d CLI to manipulate a k3d cluster,
// once the underlying machine has been setup.
type k3dAdmin struct {
	iostreams genericclioptions.IOStreams
	runner    cexec.CmdRunner
}

func newK3DAdmin(iostreams genericclioptions.IOStreams, runner cexec.CmdRunner) *k3dAdmin {
	return &k3dAdmin{
		iostreams: iostreams,
		runner:    runner,
	}
}

func (a *k3dAdmin) EnsureInstalled(ctx context.Context) error {
	_, err := exec.LookPath("k3d")
	if err != nil {
		return fmt.Errorf("k3d not installed. Please install k3d with these instructions: https://k3d.io/#installation")
	}
	return nil
}

func (a *k3dAdmin) Create(ctx context.Context, desired *api.Cluster, registry *api.Registry) error {
	klog.V(3).Infof("Creating cluster with config:\n%+v\n---\n", desired)
	if registry != nil {
		klog.V(3).Infof("Initializing cluster with registry config:\n%+v\n---\n", registry)
	}
	if len(desired.RegistryAuths) > 0 {
		return fmt.Errorf("ctlptl currently does not support connecting pull-through registries to k3d")
	}

	k3dV, err := a.version(ctx)
	if err != nil {
		return errors.Wrap(err, "detecting k3d version")
	}

	if desired.K3D != nil {
		if desired.K3D.V1Alpha4Simple != nil && k3dV.LT(v5_3) {
			return fmt.Errorf("k3d v1alpha4 config file only supported on v5.3+")
		}
		if desired.K3D.V1Alpha4Simple != nil && k3dV.LT(v5_5) {
			return fmt.Errorf("k3d v1alpha5 config file only supported on v5.5+")
		}
		if desired.K3D.V1Alpha5Simple != nil && desired.K3D.V1Alpha4Simple != nil {
			return fmt.Errorf("k3d config invalid: only one format allowed, both specified")
		}
	}

	// We generate a cluster config on all versions
	// because it does some useful validation.
	k3dConfig, err := a.clusterConfig(desired, registry, k3dV)
	if err != nil {
		return errors.Wrap(err, "creating k3d cluster")
	}

	// Delete any orphaned cluster resources, ignoring any errors.
	// This can happen if the cluster exists but has been removed from the kubeconfig.
	_ = a.Delete(ctx, desired)

	if k3dV.LT(v5_3) {
		// 5.2 and below
		args := []string{"cluster", "create", k3dConfig.name()}
		if registry != nil {
			args = append(args, "--registry-use", registry.Name)
		}

		err := a.runner.RunIO(ctx,
			genericclioptions.IOStreams{Out: a.iostreams.Out, ErrOut: a.iostreams.ErrOut},
			"k3d", args...)
		if err != nil {
			return errors.Wrap(err, "creating k3d cluster")
		}

		return nil
	}

	// 5.3 and above.
	buf := bytes.NewBuffer(nil)
	encoder := yaml.NewEncoder(buf)
	err = encoder.Encode(k3dConfig.forEncoding())
	if err != nil {
		return errors.Wrap(err, "creating k3d cluster")
	}

	args := []string{"cluster", "create", k3dConfig.name(), "--config", "-"}
	err = a.runner.RunIO(ctx,
		genericclioptions.IOStreams{In: buf, Out: a.iostreams.Out, ErrOut: a.iostreams.ErrOut},
		"k3d", args...)
	if err != nil {
		return errors.Wrap(err, "creating k3d cluster")
	}

	return nil
}

// K3D manages the LocalRegistryHosting config itself :cheers:
func (a *k3dAdmin) LocalRegistryHosting(ctx context.Context, desired *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error) {
	return nil, nil
}

func (a *k3dAdmin) Delete(ctx context.Context, config *api.Cluster) error {
	clusterName := config.Name
	if !strings.HasPrefix(clusterName, "k3d-") {
		return fmt.Errorf("all k3d clusters must have a name with the prefix k3d-*")
	}

	k3dName := strings.TrimPrefix(clusterName, "k3d-")
	err := a.runner.RunIO(ctx,
		a.iostreams,
		"k3d", "cluster", "delete", k3dName)
	if err != nil {
		return errors.Wrap(err, "deleting k3d cluster")
	}
	return nil
}

func (a *k3dAdmin) version(ctx context.Context) (semver.Version, error) {
	out := bytes.NewBuffer(nil)
	err := a.runner.RunIO(ctx,
		genericclioptions.IOStreams{Out: out, ErrOut: a.iostreams.ErrOut},
		"k3d", "version")
	if err != nil {
		return semver.Version{}, fmt.Errorf("k3d version: %v", err)
	}

	v := strings.TrimPrefix(strings.Split(out.String(), "\n")[0], "k3d version ")
	result, err := semver.ParseTolerant(v)
	if err != nil {
		return semver.Version{}, fmt.Errorf("k3d version: %v", err)
	}
	return result, nil
}

func (a *k3dAdmin) clusterConfig(desired *api.Cluster, registry *api.Registry, k3dv semver.Version) (*k3dClusterConfig, error) {
	var v4 *k3dv1alpha4.SimpleConfig
	var v5 *k3dv1alpha5.SimpleConfig
	if desired.K3D != nil && desired.K3D.V1Alpha5Simple != nil {
		v5 = desired.K3D.V1Alpha5Simple.DeepCopy()
	} else if desired.K3D != nil && desired.K3D.V1Alpha4Simple != nil {
		v4 = desired.K3D.V1Alpha4Simple.DeepCopy()
	} else if !k3dv.LT(v5_5) {
		v5 = &k3dv1alpha5.SimpleConfig{}
	} else {
		v4 = &k3dv1alpha4.SimpleConfig{}
	}

	if v5 != nil {
		v5.Kind = "Simple"
		v5.APIVersion = "k3d.io/v1alpha5"
	} else {
		v4.Kind = "Simple"
		v4.APIVersion = "k3d.io/v1alpha4"
	}

	clusterName := desired.Name
	if !strings.HasPrefix(clusterName, "k3d-") {
		return nil, fmt.Errorf("all k3d clusters must have a name with the prefix k3d-*")
	}

	if v5 != nil {
		v5.Name = strings.TrimPrefix(clusterName, "k3d-")
		if registry != nil {
			v5.Registries.Use = append(v5.Registries.Use, registry.Name)
		}
	} else {
		v4.Name = strings.TrimPrefix(clusterName, "k3d-")
		if registry != nil {
			v4.Registries.Use = append(v4.Registries.Use, registry.Name)
		}
	}
	return &k3dClusterConfig{
		v1Alpha5: v5,
		v1Alpha4: v4,
	}, nil
}

// Helper struct for serializing different file formats.
type k3dClusterConfig struct {
	v1Alpha5 *k3dv1alpha5.SimpleConfig
	v1Alpha4 *k3dv1alpha4.SimpleConfig
}

func (c *k3dClusterConfig) forEncoding() interface{} {
	if c.v1Alpha5 != nil {
		return c.v1Alpha5
	}
	if c.v1Alpha4 != nil {
		return c.v1Alpha4
	}
	return nil
}

func (c *k3dClusterConfig) name() string {
	if c.v1Alpha5 != nil {
		return c.v1Alpha5.Name
	}
	if c.v1Alpha4 != nil {
		return c.v1Alpha4.Name
	}
	return ""
}
07070100000058000081A400000000000000000000000168AFB0EA00000C93000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/cluster/admin_k3d_test.gopackage cluster

import (
	"context"
	"fmt"
	"os"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha4"
	"github.com/tilt-dev/ctlptl/pkg/api/k3dv1alpha5"
)

func TestK3DStartFlagsV4(t *testing.T) {
	f := newK3DFixture()
	f.version = "v4.0.0"

	ctx := context.Background()
	v, err := f.a.version(ctx)
	require.NoError(t, err)
	assert.Equal(t, "4.0.0", v.String())

	err = f.a.Create(ctx, &api.Cluster{
		Name: "k3d-my-cluster",
	}, &api.Registry{Name: "my-reg"})
	assert.NoError(t, err)
	assert.Equal(t, []string{
		"k3d", "cluster", "create", "my-cluster",
		"--registry-use", "my-reg",
	}, f.runner.LastArgs)
}

func TestK3DStartFlagsV5(t *testing.T) {
	f := newK3DFixture()

	ctx := context.Background()
	v, err := f.a.version(ctx)
	require.NoError(t, err)
	assert.Equal(t, "5.6.0", v.String())

	err = f.a.Create(ctx, &api.Cluster{
		Name: "k3d-my-cluster",
		K3D: &api.K3DCluster{
			V1Alpha4Simple: &k3dv1alpha4.SimpleConfig{
				Network: "bar",
			},
		},
	}, &api.Registry{Name: "my-reg"})
	require.NoError(t, err)
	assert.Equal(t, []string{
		"k3d", "cluster", "create", "my-cluster",
		"--config", "-",
	}, f.runner.LastArgs)
	assert.Equal(t, f.runner.LastStdin, `kind: Simple
apiVersion: k3d.io/v1alpha4
metadata:
    name: my-cluster
network: bar
registries:
    use:
        - my-reg
`)
}

func TestK3DV1alpha5File(t *testing.T) {
	f := newK3DFixture()

	ctx := context.Background()
	v, err := f.a.version(ctx)
	require.NoError(t, err)
	assert.Equal(t, "5.6.0", v.String())

	err = f.a.Create(ctx, &api.Cluster{
		Name: "k3d-my-cluster",
		K3D: &api.K3DCluster{
			V1Alpha5Simple: &k3dv1alpha5.SimpleConfig{
				Network: "bar",
			},
		},
	}, &api.Registry{Name: "my-reg"})
	require.NoError(t, err)
	assert.Equal(t, []string{
		"k3d", "cluster", "create", "my-cluster",
		"--config", "-",
	}, f.runner.LastArgs)
	assert.Equal(t, f.runner.LastStdin, `kind: Simple
apiVersion: k3d.io/v1alpha5
metadata:
    name: my-cluster
network: bar
registries:
    use:
        - my-reg
`)
}

func TestK3DV1alpha4FileOnOldVersions(t *testing.T) {
	f := newK3DFixture()
	f.version = "v5.4.0"

	ctx := context.Background()
	err := f.a.Create(ctx, &api.Cluster{
		Name: "k3d-my-cluster",
	}, &api.Registry{Name: "my-reg"})
	require.NoError(t, err)
	assert.Equal(t, []string{
		"k3d", "cluster", "create", "my-cluster",
		"--config", "-",
	}, f.runner.LastArgs)
	assert.Equal(t, f.runner.LastStdin, `kind: Simple
apiVersion: k3d.io/v1alpha4
metadata:
    name: my-cluster
registries:
    use:
        - my-reg
`)
}

type k3dFixture struct {
	runner  *exec.FakeCmdRunner
	a       *k3dAdmin
	version string
}

func newK3DFixture() *k3dFixture {
	iostreams := genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr}
	f := &k3dFixture{
		version: "v5.6.0",
	}
	f.runner = exec.NewFakeCmdRunner(func(argv []string) string {
		if argv[1] == "version" {
			return fmt.Sprintf(`k3d version %s
k3s version v1.24.4-k3s1 (default)
`, f.version)
		}
		return ""
	})
	f.a = newK3DAdmin(iostreams, f.runner)
	return f
}
07070100000059000081A400000000000000000000000168AFB0EA00007943000000000000000000000000000000000000002800000000ctlptl-0.8.43/pkg/cluster/admin_kind.gopackage cluster

import (
	"bufio"
	"bytes"
	"context"
	"fmt"
	"net/url"
	"os"
	"os/exec"
	"strings"

	"github.com/blang/semver/v4"
	"github.com/docker/docker/errdefs"
	"github.com/pkg/errors"
	"gopkg.in/yaml.v3"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	"k8s.io/klog/v2"
	"sigs.k8s.io/kind/pkg/apis/config/v1alpha4"

	"github.com/tilt-dev/localregistry-go"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	cexec "github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

func kindNetworkName() string {
	networkName := "kind"
	if n := os.Getenv("KIND_EXPERIMENTAL_DOCKER_NETWORK"); n != "" {
		networkName = n
	}
	return networkName
}

// kindAdmin uses the kind CLI to manipulate a kind cluster,
// once the underlying machine has been setup.
type kindAdmin struct {
	iostreams    genericclioptions.IOStreams
	runner       cexec.CmdRunner
	dockerClient dctr.Client
}

func newKindAdmin(iostreams genericclioptions.IOStreams,
	runner cexec.CmdRunner, dockerClient dctr.Client) *kindAdmin {
	return &kindAdmin{
		iostreams:    iostreams,
		runner:       runner,
		dockerClient: dockerClient,
	}
}

func (a *kindAdmin) EnsureInstalled(ctx context.Context) error {
	_, err := exec.LookPath("kind")
	if err != nil {
		return fmt.Errorf("kind not installed. Please install kind with these instructions: https://kind.sigs.k8s.io/")
	}
	return nil
}

func (a *kindAdmin) kindClusterConfig(desired *api.Cluster, registry *api.Registry, registryAPI containerdRegistryAPI) (*v1alpha4.Cluster, error) {
	kindConfig := desired.KindV1Alpha4Cluster
	if kindConfig == nil {
		kindConfig = &v1alpha4.Cluster{}
	} else {
		kindConfig = kindConfig.DeepCopy()
	}
	kindConfig.Kind = "Cluster"
	kindConfig.APIVersion = "kind.x-k8s.io/v1alpha4"

	if registry != nil {
		if registryAPI == containerdRegistryV2 && len(desired.RegistryAuths) == 0 {
			// Point to the registry config path.
			// We'll add these files post-creation.
			patch := `[plugins."io.containerd.grpc.v1.cri".registry]
    config_path = "/etc/containerd/certs.d"
`
			kindConfig.ContainerdConfigPatches = append(kindConfig.ContainerdConfigPatches, patch)
		} else {
			patch := fmt.Sprintf(`[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:%d"]
  endpoint = ["http://%s:%d"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."%s:%d"]
  endpoint = ["http://%s:%d"]
`, registry.Status.HostPort, registry.Name, registry.Status.ContainerPort,
				registry.Name, registry.Status.ContainerPort, registry.Name, registry.Status.ContainerPort)
			kindConfig.ContainerdConfigPatches = append(kindConfig.ContainerdConfigPatches, patch)
		}
	}

	for _, reg := range desired.RegistryAuths {
		// Parse the endpoint
		parsedEndpoint, err := url.Parse(reg.Endpoint)
		if err != nil {
			return nil, errors.Wrapf(err, "Error parsing registry endpoint: %s", reg.Endpoint)
		}

		// Add the registry to the list of mirrors.
		patch := fmt.Sprintf(`[plugins."io.containerd.grpc.v1.cri".registry.mirrors."%s"]
  endpoint = ["%s"]
`, reg.Host, reg.Endpoint)
		kindConfig.ContainerdConfigPatches = append(kindConfig.ContainerdConfigPatches, patch)

		// Specify the auth for the registry, if provided.
		if reg.Username != "" || reg.Password != "" {
			usernameValue := os.ExpandEnv(reg.Username)
			passwordValue := os.ExpandEnv(reg.Password)

			patch := fmt.Sprintf(`[plugins."io.containerd.grpc.v1.cri".registry.configs."%s".auth]
  username = "%s"
  password = "%s"
`, parsedEndpoint.Host, usernameValue, passwordValue)
			kindConfig.ContainerdConfigPatches = append(kindConfig.ContainerdConfigPatches, patch)
		}
	}

	return kindConfig, nil
}

func (a *kindAdmin) Create(ctx context.Context, desired *api.Cluster, registry *api.Registry) error {
	klog.V(3).Infof("Creating cluster with config:\n%+v\n---\n", desired)
	if registry != nil {
		klog.V(3).Infof("Initializing cluster with registry config:\n%+v\n---\n", registry)
	}

	clusterName := desired.Name
	if !strings.HasPrefix(clusterName, "kind-") {
		return fmt.Errorf("all kind clusters must have a name with the prefix kind-*")
	}

	kindName := strings.TrimPrefix(clusterName, "kind-")

	// If a cluster has been registered with Kind, but deleted from our kubeconfig,
	// Kind will refuse to create a new cluster. The only way to salvage it is
	// to delete and recreate.
	exists, err := a.clusterExists(ctx, kindName)
	if err != nil {
		return err
	}

	if exists {
		klog.V(3).Infof("Deleting orphaned KIND cluster: %s", kindName)
		err := a.runner.RunIO(ctx, a.iostreams, "kind", "delete", "cluster", "--name", kindName)
		if err != nil {
			return errors.Wrap(err, "deleting orphaned kind cluster")
		}
	}

	kindVersion, err := a.getKindVersion(ctx)
	if err != nil {
		return errors.Wrap(err, "creating cluster")
	}

	registryAPI := containerdRegistryV2
	kindVersionSemver, err := semver.ParseTolerant(kindVersion)
	if err != nil {
		return errors.Wrap(err, "parsing kind version")
	}
	if kindVersionSemver.LT(semver.MustParse("0.20.0")) {
		registryAPI = containerdRegistryV1
	}

	args := []string{"create", "cluster", "--name", kindName}
	if desired.KubernetesVersion != "" {
		node, err := a.getNodeImage(ctx, kindVersion, desired.KubernetesVersion)
		if err != nil {
			return errors.Wrap(err, "creating cluster")
		}
		args = append(args, "--image", node)
	}

	kindConfig, err := a.kindClusterConfig(desired, registry, registryAPI)
	if err != nil {
		return errors.Wrap(err, "generating kind config")
	}

	buf := bytes.NewBuffer(nil)
	encoder := yaml.NewEncoder(buf)
	err = encoder.Encode(kindConfig)
	if err != nil {
		return errors.Wrap(err, "creating kind cluster")
	}

	args = append(args, desired.KindExtraCreateArguments...)
	args = append(args, "--config", "-")

	iostreams := a.iostreams
	iostreams.In = buf
	err = a.runner.RunIO(ctx, iostreams, "kind", args...)
	if err != nil {
		return errors.Wrap(err, "creating kind cluster")
	}

	networkName := kindNetworkName()

	if registry != nil {
		if !a.inKindNetwork(registry, networkName) {
			_, _ = fmt.Fprintf(a.iostreams.ErrOut, "   Connecting kind to registry %s\n", registry.Name)
			err := a.dockerClient.NetworkConnect(ctx, networkName, registry.Name, nil)
			if err != nil {
				return errors.Wrap(err, "connecting registry")
			}
		}

		if registryAPI == containerdRegistryV2 && len(desired.RegistryAuths) == 0 {
			err = a.applyContainerdPatchRegistryAPIV2(ctx, desired, registry)
			if err != nil {
				return err
			}
		}
	}

	return nil
}

// We want to make sure that the image is pullable from either:
// localhost:[registry-port] or
// [registry-name]:5000
// by configuring containerd to rewrite the url.
func (a *kindAdmin) applyContainerdPatchRegistryAPIV2(ctx context.Context, desired *api.Cluster, registry *api.Registry) error {
	nodes, err := a.getNodes(ctx, desired.Name)
	if err != nil {
		return errors.Wrap(err, "configuring registry")
	}
	filtered := []string{}
	for _, node := range nodes {
		if strings.HasSuffix(node, "external-load-balancer") {
			// Ignore the external load balancers.
			// These load-balance traffic to the control plane nodes.
			// They don't need registry configuration.
			continue
		}
		filtered = append(filtered, node)
	}

	return applyContainerdPatchRegistryAPIV2(ctx, a.runner, a.iostreams,
		filtered, desired, registry)
}

func (a *kindAdmin) getNodes(ctx context.Context, cluster string) ([]string, error) {
	kindName := strings.TrimPrefix(cluster, "kind-")
	buf := bytes.NewBuffer(nil)
	iostreams := a.iostreams
	iostreams.Out = buf
	err := a.runner.RunIO(ctx, iostreams,
		"kind", "get", "nodes", "--name", kindName)
	if err != nil {
		return nil, errors.Wrap(err, "kind get nodes")
	}

	scanner := bufio.NewScanner(buf)
	result := []string{}
	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())
		if line != "" {
			result = append(result, line)
		}
	}
	return result, nil
}

func (a *kindAdmin) clusterExists(ctx context.Context, cluster string) (bool, error) {
	buf := bytes.NewBuffer(nil)
	iostreams := a.iostreams
	iostreams.Out = buf
	err := a.runner.RunIO(ctx, iostreams, "kind", "get", "clusters")
	if err != nil {
		return false, errors.Wrap(err, "kind get clusters")
	}

	scanner := bufio.NewScanner(buf)
	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())
		if line == cluster {
			return true, nil
		}
	}
	return false, nil
}

func (a *kindAdmin) inKindNetwork(registry *api.Registry, networkName string) bool {
	for _, n := range registry.Status.Networks {
		if n == networkName {
			return true
		}
	}
	return false
}

func (a *kindAdmin) LocalRegistryHosting(ctx context.Context, desired *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error) {
	return &localregistry.LocalRegistryHostingV1{
		Host:                   fmt.Sprintf("localhost:%d", registry.Status.HostPort),
		HostFromClusterNetwork: fmt.Sprintf("%s:%d", registry.Name, registry.Status.ContainerPort),
		Help:                   "https://github.com/tilt-dev/ctlptl",
	}, nil
}

func (a *kindAdmin) Delete(ctx context.Context, config *api.Cluster) error {
	clusterName := config.Name
	if !strings.HasPrefix(clusterName, "kind-") {
		return fmt.Errorf("all kind clusters must have a name with the prefix kind-*")
	}

	kindName := strings.TrimPrefix(clusterName, "kind-")
	err := a.runner.RunIO(ctx, a.iostreams, "kind", "delete", "cluster", "--name", kindName)
	if err != nil {
		return errors.Wrap(err, "deleting kind cluster")
	}
	return nil
}

func (a *kindAdmin) ModifyConfigInContainer(ctx context.Context, cluster *api.Cluster, containerID string, dockerClient dctr.Client, configWriter configWriter) error {
	err := dockerClient.NetworkConnect(ctx, kindNetworkName(), containerID, nil)
	if err != nil {
		if !errdefs.IsForbidden(err) || !strings.Contains(err.Error(), "already exists") {
			return fmt.Errorf("error connecting to cluster network: %w", err)
		}
	}

	kindName := strings.TrimPrefix(cluster.Name, "kind-")
	return configWriter.SetConfig(
		fmt.Sprintf("clusters.%s.server", cluster.Name),
		fmt.Sprintf("https://%s-control-plane:6443", kindName),
	)
}

func (a *kindAdmin) getNodeImage(ctx context.Context, kindVersion, k8sVersion string) (string, error) {
	nodeTable, ok := kindK8sNodeTable[kindVersion]
	if !ok {
		return "", fmt.Errorf("unsupported Kind version %s.\n"+
			"To set up a specific Kubernetes version in Kind, ctlptl needs an official Kubernetes image.\n"+
			"If you're running an unofficial version of Kind, remove 'kubernetesVersion' from your cluster config to use the default image.\n"+
			"If you're running a newly released version of Kind, please file an issue: https://github.com/tilt-dev/ctlptl/issues/new", kindVersion)
	}

	// Kind doesn't maintain Kubernetes nodes for every patch version, so just get the closest
	// major/minor patch.
	k8sVersionParsed, err := semver.ParseTolerant(k8sVersion)
	if err != nil {
		return "", fmt.Errorf("parsing kubernetesVersion: %v", err)
	}

	simplifiedK8sVersion := fmt.Sprintf("%d.%d", k8sVersionParsed.Major, k8sVersionParsed.Minor)
	node, ok := nodeTable[simplifiedK8sVersion]
	if !ok {
		return "", fmt.Errorf("Kind %s does not support Kubernetes v%s", kindVersion, simplifiedK8sVersion)
	}
	return node, nil
}

func (a *kindAdmin) getKindVersion(ctx context.Context) (string, error) {
	out := bytes.NewBuffer(nil)
	iostreams := a.iostreams
	iostreams.Out = out
	err := a.runner.RunIO(ctx, iostreams, "kind", "version")
	if err != nil {
		return "", errors.Wrap(err, "kind version")
	}

	parts := strings.Split(out.String(), " ")
	if len(parts) < 2 {
		return "", fmt.Errorf("parsing kind version output: %s", out.String())
	}

	return parts[1], nil
}

// This table must be built up manually from the Kind release notes each
// time a new Kind version is released :\
var kindK8sNodeTable = map[string]map[string]string{
	"v0.30.0": {
		"1.34": "kindest/node:v1.34.0@sha256:7416a61b42b1662ca6ca89f02028ac133a309a2a30ba309614e8ec94d976dc5a",
		"1.33": "kindest/node:v1.33.4@sha256:25a6018e48dfcaee478f4a59af81157a437f15e6e140bf103f85a2e7cd0cbbf2",
		"1.32": "kindest/node:v1.32.8@sha256:abd489f042d2b644e2d033f5c2d900bc707798d075e8186cb65e3f1367a9d5a1",
		"1.31": "kindest/node:v1.31.12@sha256:0f5cc49c5e73c0c2bb6e2df56e7df189240d83cf94edfa30946482eb08ec57d2",
	},
	"v0.29.0": {
		"1.33": "kindest/node:v1.33.1@sha256:050072256b9a903bd914c0b2866828150cb229cea0efe5892e2b644d5dd3b34f",
		"1.32": "kindest/node:v1.32.5@sha256:e3b2327e3a5ab8c76f5ece68936e4cafaa82edf58486b769727ab0b3b97a5b0d",
		"1.31": "kindest/node:v1.31.9@sha256:b94a3a6c06198d17f59cca8c6f486236fa05e2fb359cbd75dabbfc348a10b211",
		"1.30": "kindest/node:v1.30.13@sha256:397209b3d947d154f6641f2d0ce8d473732bd91c87d9575ade99049aa33cd648",
	},
	"v0.28.0": {
		"1.33": "kindest/node:v1.33.1@sha256:8d866994839cd096b3590681c55a6fa4a071fdaf33be7b9660e5697d2ed13002",
		"1.32": "kindest/node:v1.32.2@sha256:36187f6c542fa9b78d2d499de4c857249c5a0ac8cc2241bef2ccd92729a7a259",
		"1.31": "kindest/node:v1.31.6@sha256:156da58ab617d0cb4f56bbdb4b493f4dc89725505347a4babde9e9544888bb92",
		"1.30": "kindest/node:v1.30.10@sha256:8673291894dc400e0fb4f57243f5fdc6e355ceaa765505e0e73941aa1b6e0b80",
	},
	"v0.27.0": {
		"1.32": "kindest/node:v1.32.2@sha256:f226345927d7e348497136874b6d207e0b32cc52154ad8323129352923a3142f",
		"1.31": "kindest/node:v1.30.6@sha256:28b7cbb993dfe093c76641a0c95807637213c9109b761f1d422c2400e22b8e87",
		"1.30": "kindest/node:v1.30.10@sha256:4de75d0e82481ea846c0ed1de86328d821c1e6a6a91ac37bf804e5313670e507",
		"1.29": "kindest/node:v1.29.14@sha256:8703bd94ee24e51b778d5556ae310c6c0fa67d761fae6379c8e0bb480e6fea29",
	},
	"v0.26.0": {
		"1.32": "kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027",
		"1.31": "kindest/node:v1.31.4@sha256:2cb39f7295fe7eafee0842b1052a599a4fb0f8bcf3f83d96c7f4864c357c6c30",
		"1.30": "kindest/node:v1.30.8@sha256:17cd608b3971338d9180b00776cb766c50d0a0b6b904ab4ff52fd3fc5c6369bf",
		"1.29": "kindest/node:v1.29.12@sha256:62c0672ba99a4afd7396512848d6fc382906b8f33349ae68fb1dbfe549f70dec",
	},
	"v0.25.0": {
		"1.31": "kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e",
		"1.30": "kindest/node:v1.30.6@sha256:b6d08db72079ba5ae1f4a88a09025c0a904af3b52387643c285442afb05ab994",
		"1.29": "kindest/node:v1.29.10@sha256:3b2d8c31753e6c8069d4fc4517264cd20e86fd36220671fb7d0a5855103aa84b",
		"1.28": "kindest/node:v1.28.15@sha256:a7c05c7ae043a0b8c818f5a06188bc2c4098f6cb59ca7d1856df00375d839251",
		"1.27": "kindest/node:v1.27.16@sha256:2d21a61643eafc439905e18705b8186f3296384750a835ad7a005dceb9546d20",
		"1.26": "kindest/node:v1.26.15@sha256:c79602a44b4056d7e48dc20f7504350f1e87530fe953428b792def00bc1076dd",
	},
	"v0.24.0": {
		"1.31": "kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865",
		"1.30": "kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114",
		"1.29": "kindest/node:v1.29.8@sha256:d46b7aa29567e93b27f7531d258c372e829d7224b25e3fc6ffdefed12476d3aa",
		"1.28": "kindest/node:v1.28.13@sha256:45d319897776e11167e4698f6b14938eb4d52eb381d9e3d7a9086c16c69a8110",
		"1.27": "kindest/node:v1.27.17@sha256:3fd82731af34efe19cd54ea5c25e882985bafa2c9baefe14f8deab1737d9fabe",
		"1.26": "kindest/node:v1.26.15@sha256:1cc15d7b1edd2126ef051e359bf864f37bbcf1568e61be4d2ed1df7a3e87b354",
		"1.25": "kindest/node:v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025",
	},
	"v0.23.0": {
		"1.30": "kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e",
		"1.29": "kindest/node:v1.29.4@sha256:3abb816a5b1061fb15c6e9e60856ec40d56b7b52bcea5f5f1350bc6e2320b6f8",
		"1.28": "kindest/node:v1.28.9@sha256:dca54bc6a6079dd34699d53d7d4ffa2e853e46a20cd12d619a09207e35300bd0",
		"1.27": "kindest/node:v1.27.13@sha256:17439fa5b32290e3ead39ead1250dca1d822d94a10d26f1981756cd51b24b9d8",
		"1.26": "kindest/node:v1.26.15@sha256:84333e26cae1d70361bb7339efb568df1871419f2019c80f9a12b7e2d485fe19",
		"1.25": "kindest/node:v1.25.16@sha256:5da57dfc290ac3599e775e63b8b6c49c0c85d3fec771cd7d55b45fae14b38d3b",
	},
	"v0.22.0": {
		"1.29": "kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245",
		"1.28": "kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58",
		"1.27": "kindest/node:v1.27.11@sha256:681253009e68069b8e01aad36a1e0fa8cf18bb0ab3e5c4069b2e65cafdd70843",
		"1.26": "kindest/node:v1.26.14@sha256:5d548739ddef37b9318c70cb977f57bf3e5015e4552be4e27e57280a8cbb8e4f",
		"1.25": "kindest/node:v1.25.16@sha256:e8b50f8e06b44bb65a93678a65a26248fae585b3d3c2a669e5ca6c90c69dc519",
		"1.24": "kindest/node:v1.24.17@sha256:bad10f9b98d54586cba05a7eaa1b61c6b90bfc4ee174fdc43a7b75ca75c95e51",
		"1.23": "kindest/node:v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3",
	},
	"v0.21.0": {
		"1.29": "kindest/node:v1.29.1@sha256:a0cc28af37cf39b019e2b448c54d1a3f789de32536cb5a5db61a49623e527144",
		"1.28": "kindest/node:v1.28.6@sha256:b7e1cf6b2b729f604133c667a6be8aab6f4dde5bb042c1891ae248d9154f665b",
		"1.27": "kindest/node:v1.27.10@sha256:3700c811144e24a6c6181065265f69b9bf0b437c45741017182d7c82b908918f",
		"1.26": "kindest/node:v1.26.13@sha256:15ae92d507b7d4aec6e8920d358fc63d3b980493db191d7327541fbaaed1f789",
		"1.25": "kindest/node:v1.25.16@sha256:9d0a62b55d4fe1e262953be8d406689b947668626a357b5f9d0cfbddbebbc727",
		"1.24": "kindest/node:v1.24.17@sha256:ea292d57ec5dd0e2f3f5a2d77efa246ac883c051ff80e887109fabefbd3125c7",
		"1.23": "kindest/node:v1.23.17@sha256:fbb92ac580fce498473762419df27fa8664dbaa1c5a361b5957e123b4035bdcf",
	},
	"v0.20.0": {
		"1.28": "kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31",
		"1.27": "kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72",
		"1.26": "kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb",
		"1.25": "kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8",
		"1.24": "kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab",
		"1.23": "kindest/node:v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb",
		"1.22": "kindest/node:v1.22.17@sha256:f5b2e5698c6c9d6d0adc419c0deae21a425c07d81bbf3b6a6834042f25d4fba2",
		"1.21": "kindest/node:v1.21.14@sha256:8a4e9bb3f415d2bb81629ce33ef9c76ba514c14d707f9797a01e3216376ba093",
	},
	"v0.19.0": {
		"1.27": "kindest/node:v1.27.1@sha256:b7d12ed662b873bd8510879c1846e87c7e676a79fefc93e17b2a52989d3ff42b",
		"1.26": "kindest/node:v1.26.4@sha256:f4c0d87be03d6bea69f5e5dc0adb678bb498a190ee5c38422bf751541cebe92e",
		"1.25": "kindest/node:v1.25.9@sha256:c08d6c52820aa42e533b70bce0c2901183326d86dcdcbedecc9343681db45161",
		"1.24": "kindest/node:v1.24.13@sha256:cea86276e698af043af20143f4bf0509e730ec34ed3b7fa790cc0bea091bc5dd",
		"1.23": "kindest/node:v1.23.17@sha256:f77f8cf0b30430ca4128cc7cfafece0c274a118cd0cdb251049664ace0dee4ff",
		"1.22": "kindest/node:v1.22.17@sha256:9af784f45a584f6b28bce2af84c494d947a05bd709151466489008f80a9ce9d5",
		"1.21": "kindest/node:v1.21.14@sha256:220cfafdf6e3915fbce50e13d1655425558cb98872c53f802605aa2fb2d569cf",
	},
	"v0.18.0": {
		"1.27": "kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09",
		"1.26": "kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f",
		"1.25": "kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f",
		"1.24": "kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16",
		"1.23": "kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c",
		"1.22": "kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693",
		"1.21": "kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888",
	},
	"v0.17.0": {
		"1.26": "kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352",
		"1.25": "kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1",
		"1.24": "kindest/node:v1.24.7@sha256:577c630ce8e509131eab1aea12c022190978dd2f745aac5eb1fe65c0807eb315",
		"1.23": "kindest/node:v1.23.13@sha256:ef453bb7c79f0e3caba88d2067d4196f427794086a7d0df8df4f019d5e336b61",
		"1.22": "kindest/node:v1.22.15@sha256:7d9708c4b0873f0fe2e171e2b1b7f45ae89482617778c1c875f1053d4cef2e41",
		"1.21": "kindest/node:v1.21.14@sha256:9d9eb5fb26b4fbc0c6d95fa8c790414f9750dd583f5d7cee45d92e8c26670aa1",
		"1.20": "kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394",
		"1.19": "kindest/node:v1.19.16@sha256:476cb3269232888437b61deca013832fee41f9f074f9bed79f57e4280f7c48b7",
	},
	"v0.16.0": {
		"1.25": "kindest/node:v1.25.2@sha256:9be91e9e9cdf116809841fc77ebdb8845443c4c72fe5218f3ae9eb57fdb4bace",
		"1.24": "kindest/node:v1.24.6@sha256:97e8d00bc37a7598a0b32d1fabd155a96355c49fa0d4d4790aab0f161bf31be1",
		"1.23": "kindest/node:v1.23.12@sha256:9402cf1330bbd3a0d097d2033fa489b2abe40d479cc5ef47d0b6a6960613148a",
		"1.22": "kindest/node:v1.22.15@sha256:bfd5eaae36849bfb3c1e3b9442f3da17d730718248939d9d547e86bbac5da586",
		"1.21": "kindest/node:v1.21.14@sha256:ad5b7446dd8332439f22a1efdac73670f0da158c00f0a70b45716e7ef3fae20b",
		"1.20": "kindest/node:v1.20.15@sha256:45d0194a8069c46483a0e509088ab9249302af561ebee76a1281a1f08ecb4ed3",
		"1.19": "kindest/node:v1.19.16@sha256:a146f9819fece706b337d34125bbd5cb8ae4d25558427bf2fa3ee8ad231236f2",
	},
	"v0.15.0": {
		"1.25": "kindest/node:v1.25.0@sha256:428aaa17ec82ccde0131cb2d1ca6547d13cf5fdabcc0bbecf749baa935387cbf",
		"1.24": "kindest/node:v1.24.4@sha256:adfaebada924a26c2c9308edd53c6e33b3d4e453782c0063dc0028bdebaddf98",
		"1.23": "kindest/node:v1.23.10@sha256:f047448af6a656fae7bc909e2fab360c18c487ef3edc93f06d78cdfd864b2d12",
		"1.22": "kindest/node:v1.22.13@sha256:4904eda4d6e64b402169797805b8ec01f50133960ad6c19af45173a27eadf959",
		"1.21": "kindest/node:v1.21.14@sha256:f9b4d3d1112f24a7254d2ee296f177f628f9b4c1b32f0006567af11b91c1f301",
		"1.20": "kindest/node:v1.20.15@sha256:d67de8f84143adebe80a07672f370365ec7d23f93dc86866f0e29fa29ce026fe",
		"1.19": "kindest/node:v1.19.16@sha256:707469aac7e6805e52c3bde2a8a8050ce2b15decff60db6c5077ba9975d28b98",
		"1.18": "kindest/node:v1.18.20@sha256:61c9e1698c1cb19c3b1d8151a9135b379657aee23c59bde4a8d87923fcb43a91",
	},
	"v0.14.0": {
		"1.24": "kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e",
		"1.23": "kindest/node:v1.23.6@sha256:b1fa224cc6c7ff32455e0b1fd9cbfd3d3bc87ecaa8fcb06961ed1afb3db0f9ae",
		"1.22": "kindest/node:v1.22.9@sha256:8135260b959dfe320206eb36b3aeda9cffcb262f4b44cda6b33f7bb73f453105",
		"1.21": "kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207",
		"1.20": "kindest/node:v1.20.15@sha256:6f2d011dffe182bad80b85f6c00e8ca9d86b5b8922cdf433d53575c4c5212248",
		"1.19": "kindest/node:v1.19.16@sha256:d9c819e8668de8d5030708e484a9fdff44d95ec4675d136ef0a0a584e587f65c",
		"1.18": "kindest/node:v1.18.20@sha256:738cdc23ed4be6cc0b7ea277a2ebcc454c8373d7d8fb991a7fcdbd126188e6d7",
	},
	"v0.13.0": {
		"1.24": "kindest/node:v1.24.0@sha256:406fd86d48eaf4c04c7280cd1d2ca1d61e7d0d61ddef0125cb097bc7b82ed6a1",
		"1.23": "kindest/node:v1.23.6@sha256:1af0f1bee4c3c0fe9b07de5e5d3fafeb2eec7b4e1b268ae89fcab96ec67e8355",
		"1.22": "kindest/node:v1.22.9@sha256:6e57a6b0c493c7d7183a1151acff0bfa44bf37eb668826bf00da5637c55b6d5e",
		"1.21": "kindest/node:v1.21.12@sha256:ae05d44cc636ee961068399ea5123ae421790f472c309900c151a44ee35c3e3e",
		"1.20": "kindest/node:v1.20.15@sha256:a6ce604504db064c5e25921c6c0fffea64507109a1f2a512b1b562ac37d652f3",
		"1.19": "kindest/node:v1.19.16@sha256:dec41184d10deca01a08ea548197b77dc99eeacb56ff3e371af3193c86ca99f4",
		"1.18": "kindest/node:v1.18.20@sha256:38a8726ece5d7867fb0ede63d718d27ce2d41af519ce68be5ae7fcca563537ed",
	},
	"v0.12.0": {
		"1.23": "kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9",
		"1.22": "kindest/node:v1.22.7@sha256:1dfd72d193bf7da64765fd2f2898f78663b9ba366c2aa74be1fd7498a1873166",
		"1.21": "kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c",
		"1.20": "kindest/node:v1.20.15@sha256:393bb9096c6c4d723bb17bceb0896407d7db581532d11ea2839c80b28e5d8deb",
		"1.19": "kindest/node:v1.19.16@sha256:81f552397c1e6c1f293f967ecb1344d8857613fb978f963c30e907c32f598467",
		"1.18": "kindest/node:v1.18.20@sha256:e3dca5e16116d11363e31639640042a9b1bd2c90f85717a7fc66be34089a8169",
		"1.17": "kindest/node:v1.17.17@sha256:e477ee64df5731aa4ef4deabbafc34e8d9a686b49178f726563598344a3898d5",
		"1.16": "kindest/node:v1.16.15@sha256:64bac16b83b6adfd04ea3fbcf6c9b5b893277120f2b2cbf9f5fa3e5d4c2260cc",
		"1.15": "kindest/node:v1.15.12@sha256:9dfc13db6d3fd5e5b275f8c4657ee6a62ef9cb405546664f2de2eabcfd6db778",
		"1.14": "kindest/node:v1.14.10@sha256:b693339da2a927949025869425e20daf80111ccabf020d4021a23c00bae29d82",
	},
	"v0.11.1": {
		"1.23": "kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac",
		"1.22": "kindest/node:v1.22.0@sha256:b8bda84bb3a190e6e028b1760d277454a72267a5454b57db34437c34a588d047",
		"1.21": "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6",
		"1.20": "kindest/node:v1.20.7@sha256:cbeaf907fc78ac97ce7b625e4bf0de16e3ea725daf6b04f930bd14c67c671ff9",
		"1.19": "kindest/node:v1.19.11@sha256:07db187ae84b4b7de440a73886f008cf903fcf5764ba8106a9fd5243d6f32729",
		"1.18": "kindest/node:v1.18.19@sha256:7af1492e19b3192a79f606e43c35fb741e520d195f96399284515f077b3b622c",
		"1.17": "kindest/node:v1.17.17@sha256:66f1d0d91a88b8a001811e2f1054af60eef3b669a9a74f9b6db871f2f1eeed00",
		"1.16": "kindest/node:v1.16.15@sha256:83067ed51bf2a3395b24687094e283a7c7c865ccc12a8b1d7aa673ba0c5e8861",
		"1.15": "kindest/node:v1.15.12@sha256:b920920e1eda689d9936dfcf7332701e80be12566999152626b2c9d730397a95",
		"1.14": "kindest/node:v1.14.10@sha256:f8a66ef82822ab4f7569e91a5bccaf27bceee135c1457c512e54de8c6f7219f8",
	},
	"v0.11.0": {
		"1.21": "kindest/node:v1.21.1@sha256:fae9a58f17f18f06aeac9772ca8b5ac680ebbed985e266f711d936e91d113bad",
		"1.20": "kindest/node:v1.20.7@sha256:e645428988191fc824529fd0bb5c94244c12401cf5f5ea3bd875eb0a787f0fe9",
		"1.19": "kindest/node:v1.19.11@sha256:7664f21f9cb6ba2264437de0eb3fe99f201db7a3ac72329547ec4373ba5f5911",
		"1.18": "kindest/node:v1.18.19@sha256:530378628c7c518503ade70b1df698b5de5585dcdba4f349328d986b8849b1ee",
		"1.17": "kindest/node:v1.17.17@sha256:c581fbf67f720f70aaabc74b44c2332cc753df262b6c0bca5d26338492470c17",
		"1.16": "kindest/node:v1.16.15@sha256:430c03034cd856c1f1415d3e37faf35a3ea9c5aaa2812117b79e6903d1fc9651",
		"1.15": "kindest/node:v1.15.12@sha256:8d575f056493c7778935dd855ded0e95c48cb2fab90825792e8fc9af61536bf9",
		"1.14": "kindest/node:v1.14.10@sha256:6033e04bcfca7c5f2a9c4ce77551e1abf385bcd2709932ec2f6a9c8c0aff6d4f",
	},
	"v0.10.0": {
		"1.20": "kindest/node:v1.20.2@sha256:8f7ea6e7642c0da54f04a7ee10431549c0257315b3a634f6ef2fecaaedb19bab",
		"1.19": "kindest/node:v1.19.7@sha256:a70639454e97a4b733f9d9b67e12c01f6b0297449d5b9cbbef87473458e26dca",
		"1.18": "kindest/node:v1.18.15@sha256:5c1b980c4d0e0e8e7eb9f36f7df525d079a96169c8a8f20d8bd108c0d0889cc4",
		"1.17": "kindest/node:v1.17.17@sha256:7b6369d27eee99c7a85c48ffd60e11412dc3f373658bc59b7f4d530b7056823e",
		"1.16": "kindest/node:v1.16.15@sha256:c10a63a5bda231c0a379bf91aebf8ad3c79146daca59db816fb963f731852a99",
		"1.15": "kindest/node:v1.15.12@sha256:67181f94f0b3072fb56509107b380e38c55e23bf60e6f052fbd8052d26052fb5",
		"1.14": "kindest/node:v1.14.10@sha256:3fbed72bcac108055e46e7b4091eb6858ad628ec51bf693c21f5ec34578f6180",
	},
	"v0.9.0": {
		"1.19": "kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600",
		"1.18": "kindest/node:v1.18.8@sha256:f4bcc97a0ad6e7abaf3f643d890add7efe6ee4ab90baeb374b4f41a4c95567eb",
		"1.17": "kindest/node:v1.17.11@sha256:5240a7a2c34bf241afb54ac05669f8a46661912eab05705d660971eeb12f6555",
		"1.16": "kindest/node:v1.16.15@sha256:a89c771f7de234e6547d43695c7ab047809ffc71a0c3b65aa54eda051c45ed20",
		"1.15": "kindest/node:v1.15.12@sha256:d9b939055c1e852fe3d86955ee24976cab46cba518abcb8b13ba70917e6547a6",
		"1.14": "kindest/node:v1.14.10@sha256:ce4355398a704fca68006f8a29f37aafb49f8fc2f64ede3ccd0d9198da910146",
		"1.13": "kindest/node:v1.13.12@sha256:1c1a48c2bfcbae4d5f4fa4310b5ed10756facad0b7a2ca93c7a4b5bae5db29f5",
	},
	"v0.8.1": {
		"1.18": "kindest/node:v1.18.2@sha256:7b27a6d0f2517ff88ba444025beae41491b016bc6af573ba467b70c5e8e0d85f",
		"1.17": "kindest/node:v1.17.5@sha256:ab3f9e6ec5ad8840eeb1f76c89bb7948c77bbf76bcebe1a8b59790b8ae9a283a",
		"1.16": "kindest/node:v1.16.9@sha256:7175872357bc85847ec4b1aba46ed1d12fa054c83ac7a8a11f5c268957fd5765",
		"1.15": "kindest/node:v1.15.11@sha256:6cc31f3533deb138792db2c7d1ffc36f7456a06f1db5556ad3b6927641016f50",
		"1.14": "kindest/node:v1.14.10@sha256:6cd43ff41ae9f02bb46c8f455d5323819aec858b99534a290517ebc181b443c6",
		"1.13": "kindest/node:v1.13.12@sha256:214476f1514e47fe3f6f54d0f9e24cfb1e4cda449529791286c7161b7f9c08e7",
		"1.12": "kindest/node:v1.12.10@sha256:faeb82453af2f9373447bb63f50bae02b8020968e0889c7fa308e19b348916cb",
	},
	"v0.8.0": {
		"1.18": "kindest/node:v1.18.2@sha256:7b27a6d0f2517ff88ba444025beae41491b016bc6af573ba467b70c5e8e0d85f",
		"1.17": "kindest/node:v1.17.5@sha256:ab3f9e6ec5ad8840eeb1f76c89bb7948c77bbf76bcebe1a8b59790b8ae9a283a",
		"1.16": "kindest/node:v1.16.9@sha256:7175872357bc85847ec4b1aba46ed1d12fa054c83ac7a8a11f5c268957fd5765",
		"1.15": "kindest/node:v1.15.11@sha256:6cc31f3533deb138792db2c7d1ffc36f7456a06f1db5556ad3b6927641016f50",
		"1.14": "kindest/node:v1.14.10@sha256:6cd43ff41ae9f02bb46c8f455d5323819aec858b99534a290517ebc181b443c6",
		"1.13": "kindest/node:v1.13.12@sha256:214476f1514e47fe3f6f54d0f9e24cfb1e4cda449529791286c7161b7f9c08e7",
		"1.12": "kindest/node:v1.12.10@sha256:faeb82453af2f9373447bb63f50bae02b8020968e0889c7fa308e19b348916cb",
	},
	"v0.7.0": {
		"1.18": "kindest/node:v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694",
		"1.17": "kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62",
		"1.16": "kindest/node:v1.16.4@sha256:b91a2c2317a000f3a783489dfb755064177dbc3a0b2f4147d50f04825d016f55",
		"1.15": "kindest/node:v1.15.7@sha256:e2df133f80ef633c53c0200114fce2ed5e1f6947477dbc83261a6a921169488d",
		"1.14": "kindest/node:v1.14.10@sha256:81ae5a3237c779efc4dda43cc81c696f88a194abcc4f8fa34f86cf674aa14977",
		"1.13": "kindest/node:v1.13.12@sha256:5e8ae1a4e39f3d151d420ef912e18368745a2ede6d20ea87506920cd947a7e3a",
		"1.12": "kindest/node:v1.12.10@sha256:68a6581f64b54994b824708286fafc37f1227b7b54cbb8865182ce1e036ed1cc",
		"1.11": "kindest/node:v1.11.10@sha256:e6f3dade95b7cb74081c5b9f3291aaaa6026a90a977e0b990778b6adc9ea6248",
	},
}
0707010000005A000081A400000000000000000000000168AFB0EA00000DC9000000000000000000000000000000000000002D00000000ctlptl-0.8.43/pkg/cluster/admin_kind_test.gopackage cluster

import (
	"context"
	"os"
	"testing"

	"github.com/stretchr/testify/assert"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestNodeImage(t *testing.T) {
	runner := exec.NewFakeCmdRunner(func(argv []string) string {
		return ""
	})
	iostreams := genericclioptions.IOStreams{
		In:     os.Stdin,
		Out:    os.Stdout,
		ErrOut: os.Stderr,
	}
	a := newKindAdmin(iostreams, runner, &fakeDockerClient{})
	ctx := context.Background()

	img, err := a.getNodeImage(ctx, "v0.9.0", "v1.19")
	assert.NoError(t, err)
	assert.Equal(t, "kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600", img)

	img, err = a.getNodeImage(ctx, "v0.9.0", "v1.19.3")
	assert.NoError(t, err)
	assert.Equal(t, "kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600", img)

	img, err = a.getNodeImage(ctx, "v0.10.0", "v1.20")
	assert.NoError(t, err)
	assert.Equal(t, "kindest/node:v1.20.2@sha256:8f7ea6e7642c0da54f04a7ee10431549c0257315b3a634f6ef2fecaaedb19bab", img)

	img, err = a.getNodeImage(ctx, "v0.11.1", "v1.23")
	assert.NoError(t, err)
	assert.Equal(t, "kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac", img)

	img, err = a.getNodeImage(ctx, "v0.8.1", "v1.16.1")
	assert.NoError(t, err)
	assert.Equal(t, "kindest/node:v1.16.9@sha256:7175872357bc85847ec4b1aba46ed1d12fa054c83ac7a8a11f5c268957fd5765", img)
}

func TestPatchRegistryConfig(t *testing.T) {
	nodeExec := []string{}
	runner := exec.NewFakeCmdRunner(func(argv []string) string {
		if argv[0] == "kind" && argv[1] == "get" && argv[2] == "nodes" {
			return `kind-external-load-balancer
kind-control-plane
kind-control-plane2
`
		}
		if argv[0] == "docker" && argv[1] == "exec" && argv[2] == "-i" {
			nodeExec = append(nodeExec, argv[3])
		}
		return ""
	})
	iostreams := genericclioptions.IOStreams{
		In:     os.Stdin,
		Out:    os.Stdout,
		ErrOut: os.Stderr,
	}
	a := newKindAdmin(iostreams, runner, &fakeDockerClient{})
	ctx := context.Background()

	err := a.applyContainerdPatchRegistryAPIV2(
		ctx,
		&api.Cluster{Name: "test-cluster"},
		&api.Registry{Name: "test-registry"})
	assert.NoError(t, err)

	// Assert that we only executed commands
	// in the control plane nodes, not the LB.
	assert.Equal(t, []string{
		"kind-control-plane",
		"kind-control-plane",
		"kind-control-plane2",
		"kind-control-plane2",
	}, nodeExec)
}

func TestKindClusterConfigWithPullThroughRegistries(t *testing.T) {
	iostreams := genericclioptions.IOStreams{
		In:     os.Stdin,
		Out:    os.Stdout,
		ErrOut: os.Stderr,
	}
	runner := exec.NewFakeCmdRunner(func(argv []string) string {
		return ""
	})
	a := newKindAdmin(iostreams, runner, &fakeDockerClient{})

	desired := &api.Cluster{
		RegistryAuths: []api.RegistryAuth{
			{
				Host:     "example.com",
				Endpoint: "http://example.com:5000",
				Username: "user",
				Password: "pass",
			},
		},
	}

	kindConfig, err := a.kindClusterConfig(desired, nil, containerdRegistryV2)
	assert.NoError(t, err)

	expectedMirror := `[plugins."io.containerd.grpc.v1.cri".registry.mirrors."example.com"]
  endpoint = ["http://example.com:5000"]
`
	expectedAuth := `[plugins."io.containerd.grpc.v1.cri".registry.configs."example.com:5000".auth]
  username = "user"
  password = "pass"
`

	assert.Contains(t, kindConfig.ContainerdConfigPatches, expectedMirror)
	assert.Contains(t, kindConfig.ContainerdConfigPatches, expectedAuth)
}
0707010000005B000081A400000000000000000000000168AFB0EA00002C2F000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/cluster/admin_minikube.gopackage cluster

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"os/exec"
	"strings"

	"github.com/blang/semver/v4"
	"github.com/docker/docker/api/types/container"
	"github.com/pkg/errors"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	"k8s.io/klog/v2"

	"github.com/tilt-dev/localregistry-go"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	cexec "github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

// minikube v1.26 completely changes the api for changing the registry
var v1_26 = semver.MustParse("1.26.0")
var v1_27 = semver.MustParse("1.27.0")

// minikubeAdmin uses the minikube CLI to manipulate a minikube cluster,
// once the underlying machine has been setup.
type minikubeAdmin struct {
	iostreams    genericclioptions.IOStreams
	runner       cexec.CmdRunner
	dockerClient dctr.Client
}

func newMinikubeAdmin(iostreams genericclioptions.IOStreams, dockerClient dctr.Client, runner cexec.CmdRunner) *minikubeAdmin {
	return &minikubeAdmin{
		iostreams:    iostreams,
		dockerClient: dockerClient,
		runner:       runner,
	}
}

func (a *minikubeAdmin) EnsureInstalled(ctx context.Context) error {
	_, err := exec.LookPath("minikube")
	if err != nil {
		return fmt.Errorf("minikube not installed. Please install minikube with these instructions: https://minikube.sigs.k8s.io/")
	}
	return nil
}

type minikubeVersionResponse struct {
	MinikubeVersion string `json:"minikubeVersion"`
}

func (a *minikubeAdmin) version(ctx context.Context) (semver.Version, error) {
	out := bytes.NewBuffer(nil)
	err := a.runner.RunIO(ctx,
		genericclioptions.IOStreams{Out: out, ErrOut: a.iostreams.ErrOut},
		"minikube", "version", "-o", "json")
	if err != nil {
		return semver.Version{}, fmt.Errorf("minikube version: %v", err)
	}

	decoder := json.NewDecoder(out)
	response := minikubeVersionResponse{}
	err = decoder.Decode(&response)
	if err != nil {
		return semver.Version{}, fmt.Errorf("minikube version: %v", err)
	}
	v := response.MinikubeVersion
	if v == "" {
		return semver.Version{}, fmt.Errorf("minikube version not found")
	}
	result, err := semver.ParseTolerant(v)
	if err != nil {
		return semver.Version{}, fmt.Errorf("minikube version: %v", err)
	}
	return result, nil
}

func (a *minikubeAdmin) Create(ctx context.Context, desired *api.Cluster, registry *api.Registry) error {
	klog.V(3).Infof("Creating cluster with config:\n%+v\n---\n", desired)
	if registry != nil {
		klog.V(3).Infof("Initializing cluster with registry config:\n%+v\n---\n", registry)
	}
	if len(desired.RegistryAuths) > 0 {
		return fmt.Errorf("ctlptl currently does not support connecting pull-through registries to minikube")
	}

	v, err := a.version(ctx)
	if err != nil {
		return err
	}
	registryAPI := containerdRegistryV1
	if v.GTE(v1_26) && v.LT(v1_27) {
		registryAPI = containerdRegistryBroken
	} else if v.GTE(v1_27) {
		registryAPI = containerdRegistryV2
	}

	clusterName := desired.Name
	if registry != nil {
		// Assume the network name is the same as the cluster name,
		// which is true in minikube 0.15+. It's OK if it doesn't,
		// because we double-check if the registry is in the network.
		err := a.ensureRegistryDisconnected(ctx, registry, container.NetworkMode(clusterName))
		if err != nil {
			return err
		}
	}

	containerRuntime := "containerd"
	if desired.Minikube != nil && desired.Minikube.ContainerRuntime != "" {
		containerRuntime = desired.Minikube.ContainerRuntime
	}

	extraConfigs := []string{"kubelet.max-pods=500"}
	if desired.Minikube != nil && len(desired.Minikube.ExtraConfigs) > 0 {
		extraConfigs = desired.Minikube.ExtraConfigs
	}

	args := []string{
		"start",
	}

	if desired.Minikube != nil {
		args = append(args, desired.Minikube.StartFlags...)
	}

	args = append(args,
		"-p", clusterName,
		"--driver=docker",
		fmt.Sprintf("--container-runtime=%s", containerRuntime),
	)

	for _, c := range extraConfigs {
		args = append(args, fmt.Sprintf("--extra-config=%s", c))
	}

	if desired.MinCPUs != 0 {
		args = append(args, fmt.Sprintf("--cpus=%d", desired.MinCPUs))
	}
	if desired.KubernetesVersion != "" {
		args = append(args, "--kubernetes-version", desired.KubernetesVersion)
	}

	// https://github.com/tilt-dev/ctlptl/issues/239
	if registry != nil {
		if registryAPI == containerdRegistryBroken {
			return fmt.Errorf(
				"Error: Local registries are broken in minikube v1.26.\n" +
					"See: https://github.com/kubernetes/minikube/issues/14480 .\n" +
					"Please upgrade to minikube v1.27.")
		}
		args = append(args, "--insecure-registry", fmt.Sprintf("%s:%d", registry.Name, registry.Status.ContainerPort))
	}

	in := strings.NewReader("")

	err = a.runner.RunIO(ctx,
		genericclioptions.IOStreams{In: in, Out: a.iostreams.Out, ErrOut: a.iostreams.ErrOut},
		"minikube", args...)
	if err != nil {
		return errors.Wrap(err, "creating minikube cluster")
	}

	if registry != nil {
		container, err := a.dockerClient.ContainerInspect(ctx, clusterName)
		if err != nil {
			return errors.Wrap(err, "inspecting minikube cluster")
		}
		networkMode := container.HostConfig.NetworkMode
		err = a.ensureRegistryConnected(ctx, registry, networkMode)
		if err != nil {
			return err
		}

		if registryAPI == containerdRegistryV2 {
			err = a.applyContainerdPatchRegistryAPIV2(ctx, desired, registry, networkMode)
			if err != nil {
				return err
			}
		} else {
			err = a.applyContainerdPatchRegistryAPIV1(ctx, desired, registry, networkMode)
			if err != nil {
				return err
			}
		}
	}

	return nil
}

// Minikube v0.15.0+ creates a unique network for each minikube cluster.
func (a *minikubeAdmin) ensureRegistryConnected(ctx context.Context, registry *api.Registry, networkMode container.NetworkMode) error {
	if networkMode.IsUserDefined() && !a.inRegistryNetwork(registry, networkMode) {
		err := a.dockerClient.NetworkConnect(ctx, networkMode.UserDefined(), registry.Name, nil)
		if err != nil {
			return errors.Wrap(err, "connecting registry")
		}
	}
	return nil
}

// Minikube hard-codes IP addresses in the cluster network.
// So make sure the registry is disconnected from the network before running
// "minikube start".
//
// https://github.com/tilt-dev/ctlptl/issues/144
func (a *minikubeAdmin) ensureRegistryDisconnected(ctx context.Context, registry *api.Registry, networkMode container.NetworkMode) error {
	if networkMode.IsUserDefined() && a.inRegistryNetwork(registry, networkMode) {
		err := a.dockerClient.NetworkDisconnect(ctx, networkMode.UserDefined(), registry.Name, false)
		if err != nil {
			return errors.Wrap(err, "disconnecting registry")
		}

		// Remove the network from the current set of networks attached to the registry. This allows the registry to be reconnected after
		// a cluster delete and create operation without removing the registry
		networks := []string{}
		for _, n := range registry.Status.Networks {
			if n != networkMode.UserDefined() {
				networks = append(networks, n)
			}
		}
		registry.Status.Networks = networks
	}
	return nil
}

func (a *minikubeAdmin) getNodes(ctx context.Context, name string) ([]string, error) {
	nodeOutput := bytes.NewBuffer(nil)
	err := a.runner.RunIO(ctx,
		genericclioptions.IOStreams{Out: nodeOutput, ErrOut: a.iostreams.ErrOut},
		"minikube", "-p", name, "node", "list")
	if err != nil {
		return nil, errors.Wrap(err, "configuring minikube registry")
	}

	nodes := []string{}
	nodeOutputSplit := strings.Split(nodeOutput.String(), "\n")
	for _, line := range nodeOutputSplit {
		fields := strings.Fields(line)
		if len(fields) == 0 {
			continue
		}
		node := strings.TrimSpace(fields[0])
		if node == "" {
			continue
		}
		nodes = append(nodes, node)
	}
	return nodes, nil
}

// We want to make sure that the image is pullable from either:
// localhost:[registry-port] or
// [registry-name]:5000
// by cloning the registry config created by minikube's --insecure-registry.
func (a *minikubeAdmin) applyContainerdPatchRegistryAPIV2(ctx context.Context, desired *api.Cluster, registry *api.Registry, networkMode container.NetworkMode) error {
	nodes, err := a.getNodes(ctx, desired.Name)
	if err != nil {
		return errors.Wrap(err, "configuring minikube registry")
	}

	return applyContainerdPatchRegistryAPIV2(ctx, a.runner, a.iostreams, nodes, desired, registry)
}

// We still patch containerd so that the user can push/pull from localhost.
// But note that this will NOT survive across minikube stop and start.
// See https://github.com/tilt-dev/ctlptl/issues/180
func (a *minikubeAdmin) applyContainerdPatchRegistryAPIV1(ctx context.Context, desired *api.Cluster, registry *api.Registry, networkMode container.NetworkMode) error {
	configPath := "/etc/containerd/config.toml"

	nodes, err := a.getNodes(ctx, desired.Name)
	if err != nil {
		return errors.Wrap(err, "configuring minikube registry")
	}

	for _, node := range nodes {
		networkHost := registry.Status.IPAddress
		if networkMode.IsUserDefined() {
			networkHost = registry.Name
		}

		// this is the most annoying sed expression i've ever had to write
		// minikube does not give us great primitives for writing files on the host machine :\
		// so we have to hack around the shell escaping on its interactive shell
		err := a.runner.RunIO(ctx,
			a.iostreams,
			"minikube", "-p", desired.Name, "--node", node,
			"ssh", "sudo", "sed", `\-i`,
			fmt.Sprintf(
				`s,\\\[plugins.\\\(\\\"\\\?.*cri\\\"\\\?\\\).registry.mirrors\\\],[plugins.\\\1.registry.mirrors]\\\n`+
					`\ \ \ \ \ \ \ \ [plugins.\\\1.registry.mirrors.\\\"localhost:%d\\\"]\\\n`+
					`\ \ \ \ \ \ \ \ \ \ endpoint\ =\ [\\\"http://%s:%d\\\"],`,
				registry.Status.HostPort, networkHost, registry.Status.ContainerPort),
			configPath)
		if err != nil {
			return errors.Wrap(err, "configuring minikube registry")
		}

		err = a.runner.RunIO(ctx, a.iostreams, "minikube", "-p", desired.Name, "--node", node,
			"ssh", "sudo", "systemctl", "restart", "containerd")
		if err != nil {
			return errors.Wrap(err, "configuring minikube registry")
		}
	}
	return nil
}

func (a *minikubeAdmin) inRegistryNetwork(registry *api.Registry, networkMode container.NetworkMode) bool {
	for _, n := range registry.Status.Networks {
		if n == networkMode.UserDefined() {
			return true
		}
	}
	return false
}

func (a *minikubeAdmin) LocalRegistryHosting(ctx context.Context, desired *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error) {
	container, err := a.dockerClient.ContainerInspect(ctx, desired.Name)
	if err != nil {
		return nil, errors.Wrap(err, "inspecting minikube cluster")
	}
	networkMode := container.HostConfig.NetworkMode
	networkHost := registry.Status.IPAddress
	if networkMode.IsUserDefined() {
		networkHost = registry.Name
	}

	return &localregistry.LocalRegistryHostingV1{
		Host:                     fmt.Sprintf("localhost:%d", registry.Status.HostPort),
		HostFromClusterNetwork:   fmt.Sprintf("%s:%d", networkHost, registry.Status.ContainerPort),
		HostFromContainerRuntime: fmt.Sprintf("%s:%d", networkHost, registry.Status.ContainerPort),
		Help:                     "https://github.com/tilt-dev/ctlptl",
	}, nil
}

func (a *minikubeAdmin) Delete(ctx context.Context, config *api.Cluster) error {
	err := a.runner.RunIO(ctx, a.iostreams, "minikube", "delete", "-p", config.Name)
	if err != nil {
		return errors.Wrap(err, "deleting minikube cluster")
	}
	return nil
}
0707010000005C000081A400000000000000000000000168AFB0EA00000500000000000000000000000000000000000000003100000000ctlptl-0.8.43/pkg/cluster/admin_minikube_test.gopackage cluster

import (
	"context"
	"os"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestMinikubeStartFlags(t *testing.T) {
	f := newMinikubeFixture()
	ctx := context.Background()
	err := f.a.Create(ctx, &api.Cluster{Name: "minikube", Minikube: &api.MinikubeCluster{StartFlags: []string{"--foo"}}}, nil)
	require.NoError(t, err)
	assert.Equal(t, []string{
		"minikube", "start",
		"--foo",
		"-p", "minikube",
		"--driver=docker",
		"--container-runtime=containerd",
		"--extra-config=kubelet.max-pods=500",
	}, f.runner.LastArgs)
}

type minikubeFixture struct {
	runner *exec.FakeCmdRunner
	a      *minikubeAdmin
}

func newMinikubeFixture() *minikubeFixture {
	dockerClient := &fakeDockerClient{ncpu: 1}
	iostreams := genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr}
	runner := exec.NewFakeCmdRunner(func(argv []string) string {
		if argv[1] == "version" {
			return `{"commit":"62e108c3dfdec8029a890ad6d8ef96b6461426dc","minikubeVersion":"v1.25.2"}`
		}
		return ""
	})
	return &minikubeFixture{
		runner: runner,
		a:      newMinikubeAdmin(iostreams, dockerClient, runner),
	}
}
0707010000005D000081A400000000000000000000000168AFB0EA000080A6000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/cluster/cluster.gopackage cluster

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"runtime"
	"sort"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/blang/semver/v4"
	"github.com/google/go-cmp/cmp"
	"github.com/pkg/errors"
	"github.com/tilt-dev/clusterid"
	"github.com/tilt-dev/localregistry-go"
	"golang.org/x/sync/errgroup"
	"gopkg.in/yaml.v3"
	corev1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/fields"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/apimachinery/pkg/util/duration"
	"k8s.io/apimachinery/pkg/util/wait"
	"k8s.io/apimachinery/pkg/version"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/rest"
	"k8s.io/client-go/tools/clientcmd"
	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
	"k8s.io/klog/v2"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/internal/socat"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/docker"
	"github.com/tilt-dev/ctlptl/pkg/registry"

	// Client auth plugins! They will auto-init if we import them.
	_ "k8s.io/client-go/plugin/pkg/client/auth"
)

const clusterSpecConfigMap = "ctlptl-cluster-spec"

var typeMeta = api.TypeMeta{APIVersion: "ctlptl.dev/v1alpha1", Kind: "Cluster"}
var listTypeMeta = api.TypeMeta{APIVersion: "ctlptl.dev/v1alpha1", Kind: "ClusterList"}
var groupResource = schema.GroupResource{Group: "ctlptl.dev", Resource: "clusters"}

// Due to the way the Kubernetes apiserver works, there's no easy way to
// distinguish between "server is taking a long time to respond because it's
// gone" and "server is taking a long time to respond because it has a slow auth
// plugin".
//
// So our health check timeout is a bit longer than we'd like.
// Fortunately, ctlptl is mostly used for local clusters.
const healthCheckTimeout = 3 * time.Second

const waitForKubeConfigTimeout = time.Minute
const waitForClusterCreateTimeout = 5 * time.Minute

func TypeMeta() api.TypeMeta {
	return typeMeta
}
func ListTypeMeta() api.TypeMeta {
	return listTypeMeta
}

type configLoader func() (clientcmdapi.Config, error)

type registryController interface {
	Apply(ctx context.Context, r *api.Registry) (*api.Registry, error)
	List(ctx context.Context, options registry.ListOptions) (*api.RegistryList, error)
}

type clientLoader func(*rest.Config) (kubernetes.Interface, error)

type socatController interface {
	ConnectRemoteDockerPort(ctx context.Context, port int) error
}

type Controller struct {
	iostreams                   genericclioptions.IOStreams
	runner                      exec.CmdRunner
	config                      clientcmdapi.Config
	clients                     map[string]kubernetes.Interface
	admins                      map[clusterid.Product]Admin
	dockerCLI                   dctr.CLI
	dmachine                    *dockerMachine
	configLoader                configLoader
	configWriter                configWriter
	registryCtl                 registryController
	clientLoader                clientLoader
	socat                       socatController
	waitForKubeConfigTimeout    time.Duration
	waitForClusterCreateTimeout time.Duration
	os                          string

	// TODO(nick): I deeply regret making this struct use goroutines. It makes
	// everything so much more complex.
	//
	// We should try to split this up into two structs - the part that needs
	// concurrency for performance, and the part that is fine being
	// single-threaded.
	mu sync.Mutex
}

func DefaultController(iostreams genericclioptions.IOStreams) (*Controller, error) {
	configLoader := configLoader(func() (clientcmdapi.Config, error) {
		rules := clientcmd.NewDefaultClientConfigLoadingRules()
		rules.DefaultClientConfig = &clientcmd.DefaultClientConfig

		overrides := &clientcmd.ConfigOverrides{}
		loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
		return loader.RawConfig()
	})

	configWriter := kubeconfigWriter{iostreams: iostreams}

	clientLoader := clientLoader(func(restConfig *rest.Config) (kubernetes.Interface, error) {
		return kubernetes.NewForConfig(restConfig)
	})

	config, err := configLoader()
	if err != nil {
		return nil, err
	}

	return &Controller{
		iostreams:                   iostreams,
		runner:                      exec.RealCmdRunner{},
		config:                      config,
		configWriter:                configWriter,
		clients:                     make(map[string]kubernetes.Interface),
		admins:                      make(map[clusterid.Product]Admin),
		configLoader:                configLoader,
		clientLoader:                clientLoader,
		waitForKubeConfigTimeout:    waitForKubeConfigTimeout,
		waitForClusterCreateTimeout: waitForClusterCreateTimeout,
		os:                          runtime.GOOS,
	}, nil
}

func (c *Controller) getSocatController(ctx context.Context) (socatController, error) {
	dcli, err := c.getDockerCLI(ctx)
	if err != nil {
		return nil, err
	}

	c.mu.Lock()
	defer c.mu.Unlock()

	if c.socat == nil {
		c.socat = socat.NewController(dcli)
	}

	return c.socat, nil
}

func (c *Controller) getDockerCLI(ctx context.Context) (dctr.CLI, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	if c.dockerCLI != nil {
		return c.dockerCLI, nil
	}

	cli, err := dctr.NewCLI(c.iostreams)
	if err != nil {
		return nil, err
	}

	c.dockerCLI = cli
	return cli, nil
}

func (c *Controller) machine(ctx context.Context, name string, product clusterid.Product) (Machine, error) {
	dockerCLI, err := c.getDockerCLI(ctx)
	if err != nil {
		return nil, err
	}

	c.mu.Lock()
	defer c.mu.Unlock()

	switch product {
	case clusterid.ProductDockerDesktop, clusterid.ProductKIND, clusterid.ProductK3D:
		if c.dmachine == nil {
			machine, err := NewDockerMachine(ctx, dockerCLI.Client(), c.iostreams)
			if err != nil {
				return nil, err
			}
			c.dmachine = machine
		}
		return c.dmachine, nil

	case clusterid.ProductMinikube:
		if c.dmachine == nil {
			machine, err := NewDockerMachine(ctx, dockerCLI.Client(), c.iostreams)
			if err != nil {
				return nil, err
			}
			c.dmachine = machine
		}
		return newMinikubeMachine(c.iostreams, c.runner, name, c.dmachine), nil
	}

	return unknownMachine{product: product}, nil
}

func (c *Controller) registryController(ctx context.Context) (registryController, error) {
	dockerCLI, err := c.getDockerCLI(ctx)
	if err != nil {
		return nil, err
	}

	c.mu.Lock()
	defer c.mu.Unlock()

	result := c.registryCtl
	if result == nil {
		result = registry.NewController(c.iostreams, dockerCLI)
		c.registryCtl = result
	}
	return result, nil
}

// A cluster admin provides the basic start/stop functionality of a cluster,
// independent of the configuration of the machine it's running on.
func (c *Controller) admin(ctx context.Context, product clusterid.Product) (Admin, error) {
	dockerCLI, err := c.getDockerCLI(ctx)
	if err != nil {
		return nil, err
	}

	c.mu.Lock()
	defer c.mu.Unlock()

	admin, ok := c.admins[product]
	if ok {
		return admin, nil
	}

	switch product {
	case clusterid.ProductDockerDesktop:
		if !docker.IsLocalDockerDesktop(dockerCLI.Client().DaemonHost(), c.os) {
			return nil, fmt.Errorf("Detected remote DOCKER_HOST. Remote Docker engines do not support Docker Desktop clusters: %s",
				dockerCLI.Client().DaemonHost())
		}

		admin = newDockerDesktopAdmin(dockerCLI.Client().DaemonHost(), c.os, c.dmachine.d4m)
	case clusterid.ProductKIND:
		admin = newKindAdmin(c.iostreams, c.runner, dockerCLI.Client())
	case clusterid.ProductK3D:
		admin = newK3DAdmin(c.iostreams, c.runner)
	case clusterid.ProductMinikube:
		admin = newMinikubeAdmin(c.iostreams, dockerCLI.Client(), c.runner)
	}

	if product == "" {
		return nil, fmt.Errorf("you must specify a 'product' field in your cluster config")
	}
	if admin == nil {
		return nil, fmt.Errorf("ctlptl doesn't know how to set up clusters for product: %s", product)
	}
	c.admins[product] = admin
	return admin, nil
}

func (c *Controller) configCopy() *clientcmdapi.Config {
	c.mu.Lock()
	defer c.mu.Unlock()

	return c.config.DeepCopy()
}

// Gets the port of the current API server.
func (c *Controller) currentAPIServerPort() int {
	c.mu.Lock()
	defer c.mu.Unlock()

	current := c.config.CurrentContext
	context, ok := c.config.Contexts[current]
	if !ok {
		return 0
	}

	cluster, ok := c.config.Clusters[context.Cluster]
	if !ok {
		return 0
	}

	parts := strings.Split(cluster.Server, ":")
	port, err := strconv.Atoi(parts[len(parts)-1])
	if err != nil {
		return 0
	}
	return port
}

func (c *Controller) configCurrent() string {
	c.mu.Lock()
	defer c.mu.Unlock()

	return c.config.CurrentContext
}

func (c *Controller) client(name string) (kubernetes.Interface, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	client, ok := c.clients[name]
	if ok {
		return client, nil
	}

	restConfig, err := clientcmd.NewDefaultClientConfig(
		c.config, &clientcmd.ConfigOverrides{CurrentContext: name}).ClientConfig()
	if err != nil {
		return nil, err
	}

	client, err = c.clientLoader(restConfig)
	if err != nil {
		return nil, err
	}
	c.clients[name] = client
	return client, nil
}

func (c *Controller) populateCreationTimestamp(ctx context.Context, cluster *api.Cluster, client kubernetes.Interface) error {
	nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
	if err != nil {
		return err
	}

	minTime := metav1.Time{}
	for _, node := range nodes.Items {
		cTime := node.CreationTimestamp
		if minTime.Time.IsZero() || cTime.Time.Before(minTime.Time) {
			minTime = cTime
		}
	}

	cluster.Status.CreationTimestamp = minTime

	return nil
}

func (c *Controller) populateLocalRegistryHosting(ctx context.Context, cluster *api.Cluster, client kubernetes.Interface) error {
	hosting, err := localregistry.Discover(ctx, client.CoreV1())
	if err != nil {
		return err
	}

	cluster.Status.LocalRegistryHosting = &hosting

	if hosting.Host == "" {
		return nil
	}

	// Let's try to find the registry corresponding to this cluster.
	var port int
	for _, pattern := range []string{"localhost:%d", "127.0.0.1:%d"} {
		_, _ = fmt.Sscanf(hosting.Host, pattern, &port)
		if port != 0 {
			break
		}
	}

	if port == 0 {
		return nil
	}

	registryCtl, err := c.registryController(ctx)
	if err != nil {
		return err
	}

	registryList, err := registryCtl.List(ctx, registry.ListOptions{FieldSelector: fmt.Sprintf("port=%d", port)})
	if err != nil {
		return err
	}

	if len(registryList.Items) == 0 {
		return nil
	}

	cluster.Registry = registryList.Items[0].Name

	return nil
}

func (c *Controller) populateMachineStatus(ctx context.Context, cluster *api.Cluster) error {
	machine, err := c.machine(ctx, cluster.Name, clusterid.Product(cluster.Product))
	if err != nil {
		return err
	}

	cpu, err := machine.CPUs(ctx)
	if err != nil {
		return err
	}
	cluster.Status.CPUs = cpu
	return nil
}

func (c *Controller) populateClusterSpec(ctx context.Context, cluster *api.Cluster, client kubernetes.Interface) error {
	cMap, err := client.CoreV1().ConfigMaps("kube-public").Get(ctx, clusterSpecConfigMap, metav1.GetOptions{})
	if err != nil {
		if apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
			return nil
		}
		return err
	}

	spec := api.Cluster{}
	err = yaml.Unmarshal([]byte(cMap.Data["cluster.v1alpha1"]), &spec)
	if err != nil {
		return err
	}

	cluster.KubernetesVersion = spec.KubernetesVersion
	cluster.MinCPUs = spec.MinCPUs
	cluster.KindV1Alpha4Cluster = spec.KindV1Alpha4Cluster
	cluster.Minikube = spec.Minikube
	cluster.K3D = spec.K3D
	return nil
}

// If you have dead clusters in your kubeconfig, it's common for the requests to
// hang indefinitely. So we do a quick health check with a short timeout.
func (c *Controller) healthCheckCluster(ctx context.Context, client kubernetes.Interface) (*version.Info, error) {
	ctx, cancel := context.WithTimeout(ctx, healthCheckTimeout)
	defer cancel()

	return c.serverVersion(ctx, client)
}

// A fork of DiscoveryClient ServerVersion that obeys Context timeouts.
func (c *Controller) serverVersion(ctx context.Context, client kubernetes.Interface) (*version.Info, error) {
	restClient := client.Discovery().RESTClient()
	if restClient == nil {
		return client.Discovery().ServerVersion()
	}

	body, err := restClient.Get().AbsPath("/version").Do(ctx).Raw()
	if err != nil {
		return nil, err
	}
	var info version.Info
	err = json.Unmarshal(body, &info)
	if err != nil {
		return nil, fmt.Errorf("unable to parse the server version: %v", err)
	}
	return &info, nil
}

// Query the cluster for its attributes and populate the given object.
func (c *Controller) populateCluster(ctx context.Context, cluster *api.Cluster) {
	// When setting up clusters on remote Docker, we set up a socat
	// tunnel. But sometimes that socat tunnel dies! This makes it impossible
	// to populate the cluster attributes because we can't even talk to the cluster.
	//
	// If this looks like it might be running on a remote Docker instance,
	// ensure the socat tunnel is running. It's semantically odd that 'ctlptl get'
	// creates a persistent tunnel, but is probably closer to what users expect.
	name := cluster.Name
	product := clusterid.Product(cluster.Product)
	if product == clusterid.ProductKIND || product == clusterid.ProductK3D || product == clusterid.ProductMinikube {
		err := c.maybeCreateForwarderForCurrentCluster(ctx, io.Discard)
		if err != nil {
			// If creating the forwarder fails, that's OK. We may still be able to populate things.
			klog.V(4).Infof("WARNING: connecting socat tunnel to cluster %s: %v\n", name, err)
		}
	}

	client, err := c.client(cluster.Name)
	if err != nil {
		klog.V(4).Infof("WARNING: creating cluster %s client: %v\n", name, err)
		return
	}

	cluster.Status.Current = c.configCurrent() == cluster.Name

	v, err := c.healthCheckCluster(ctx, client)
	if err != nil {
		cluster.Status.Error = fmt.Sprintf("healthcheck: %s", err.Error())

		// If the cluster isn't reachable, don't try updating the rest
		// of the fields.
		return
	}

	cluster.Status.KubernetesVersion = v.GitVersion

	g, ctx := errgroup.WithContext(ctx)
	g.Go(func() error {
		err := c.populateCreationTimestamp(ctx, cluster, client)
		if err != nil {
			klog.V(4).Infof("WARNING: reading cluster %s creation time: %v\n", name, err)
		}
		return err
	})

	g.Go(func() error {
		err := c.populateLocalRegistryHosting(ctx, cluster, client)
		if err != nil {
			klog.V(4).Infof("WARNING: reading cluster %s registry: %v\n", name, err)
		}
		return err
	})

	g.Go(func() error {
		err := c.populateMachineStatus(ctx, cluster)
		if err != nil {
			klog.V(4).Infof("WARNING: reading cluster %s machine: %v\n", name, err)
		}
		return err
	})

	g.Go(func() error {
		err := c.populateClusterSpec(ctx, cluster, client)
		if err != nil {
			klog.V(4).Infof("WARNING: reading cluster %s spec: %v\n", name, err)
		}
		return err
	})

	err = g.Wait()
	if err != nil {
		cluster.Status.Error = fmt.Sprintf("reading status: %s", err.Error())
	}
}

func FillDefaults(cluster *api.Cluster) {
	// If the name is in the Kind config, but not in the main config,
	// lift it up to the main config.
	if cluster.KindV1Alpha4Cluster != nil && cluster.Name == "" && cluster.KindV1Alpha4Cluster.Name != "" {
		cluster.Name = fmt.Sprintf("kind-%s", cluster.KindV1Alpha4Cluster.Name)
	}

	// Create a default name if one isn't in the YAML.
	// The default name is determined by the underlying product.
	if cluster.Name == "" {
		cluster.Name = clusterid.Product(cluster.Product).DefaultClusterName()
	}

	// Override the Kind config if necessary.
	if cluster.KindV1Alpha4Cluster != nil {
		cluster.KindV1Alpha4Cluster.Name = strings.TrimPrefix(cluster.Name, "kind-")
	}
}

// TODO(nick): Add more registry-supporting clusters.
func supportsRegistry(product clusterid.Product) bool {
	return product == clusterid.ProductKIND || product == clusterid.ProductMinikube || product == clusterid.ProductK3D
}

func supportsKubernetesVersion(product clusterid.Product, version string) bool {
	return product == clusterid.ProductKIND || product == clusterid.ProductMinikube
}

func (c *Controller) canReconcileK8sVersion(ctx context.Context, desired, existing *api.Cluster) bool {
	if desired.KubernetesVersion == "" {
		return true
	}

	if desired.KubernetesVersion == existing.Status.KubernetesVersion {
		return true
	}

	// On KIND, it's ok if the patch doesn't match.
	if clusterid.Product(desired.Product) == clusterid.ProductKIND {
		dv, err := semver.ParseTolerant(desired.KubernetesVersion)
		if err != nil {
			return false
		}
		ev, err := semver.ParseTolerant(existing.Status.KubernetesVersion)
		if err != nil {
			return false
		}
		return dv.Major == ev.Major && dv.Minor == ev.Minor
	}

	return false
}

func (c *Controller) deleteIfIrreconcilable(ctx context.Context, desired, existing *api.Cluster) error {
	if existing.Name == "" {
		// Nothing to delete
		return nil
	}

	needsDelete := false
	if existing.Product != "" && existing.Product != desired.Product {
		_, _ = fmt.Fprintf(c.iostreams.ErrOut, "Deleting cluster %s to change admin from %s to %s\n",
			desired.Name, existing.Product, desired.Product)
		needsDelete = true
	} else if desired.Registry != "" && desired.Registry != existing.Registry {
		// TODO(nick): Ideally, we should be able to patch a cluster
		// with a registry, but it gets a little hairy.
		_, _ = fmt.Fprintf(c.iostreams.ErrOut, "Deleting cluster %s to initialize with registry %s\n",
			desired.Name, desired.Registry)
		needsDelete = true
	} else if !c.canReconcileK8sVersion(ctx, desired, existing) {
		_, _ = fmt.Fprintf(c.iostreams.ErrOut,
			"Deleting cluster %s because desired Kubernetes version (%s) does not match current (%s)\n",
			desired.Name, desired.KubernetesVersion, existing.Status.KubernetesVersion)
		needsDelete = true
	} else if desired.KindV1Alpha4Cluster != nil && !cmp.Equal(existing.KindV1Alpha4Cluster, desired.KindV1Alpha4Cluster) {
		_, _ = fmt.Fprintf(c.iostreams.ErrOut,
			"Deleting cluster %s because desired Kind config does not match current.\nCluster config diff: %s\n",
			desired.Name, cmp.Diff(existing.KindV1Alpha4Cluster, desired.KindV1Alpha4Cluster))
		needsDelete = true
	} else if desired.Minikube != nil && !cmp.Equal(existing.Minikube, desired.Minikube) {
		_, _ = fmt.Fprintf(c.iostreams.ErrOut,
			"Deleting cluster %s because desired Minikube config does not match current.\nCluster config diff: %s\n",
			desired.Name, cmp.Diff(existing.Minikube, desired.Minikube))
		needsDelete = true
	} else if desired.K3D != nil && !cmp.Equal(existing.K3D, desired.K3D) {
		_, _ = fmt.Fprintf(c.iostreams.ErrOut,
			"Deleting cluster %s because desired K3D config does not match current.\nCluster config diff: %s\n",
			desired.Name, cmp.Diff(existing.K3D, desired.K3D))
		needsDelete = true
	}

	if !needsDelete {
		return nil
	}

	err := c.Delete(ctx, desired.Name)
	if err != nil {
		return err
	}
	*existing = api.Cluster{}
	return nil
}

// Checks if a registry exists with the given name, and creates one if it doesn't.
func (c *Controller) ensureRegistryExistsForCluster(ctx context.Context, desired *api.Cluster) (*api.Registry, error) {
	regName := desired.Registry
	if regName == "" {
		return nil, nil
	}

	regLabels := map[string]string{}
	if desired.Product == string(clusterid.ProductK3D) {
		// A K3D cluster will only connect to a registry
		// with these labels.
		regLabels["app"] = "k3d"
		regLabels["k3d.role"] = "registry"
	}

	regCtl, err := c.registryController(ctx)
	if err != nil {
		return nil, err
	}

	return regCtl.Apply(ctx, &api.Registry{
		TypeMeta: registry.TypeMeta(),
		Name:     regName,
		Labels:   regLabels,
	})
}

// Compare the desired cluster against the existing cluster, and reconcile
// the two to match.
func (c *Controller) Apply(ctx context.Context, desired *api.Cluster) (*api.Cluster, error) {
	if desired.Product == "" {
		return nil, fmt.Errorf("product field must be non-empty")
	}
	if desired.Registry != "" && !supportsRegistry(clusterid.Product(desired.Product)) {
		return nil, fmt.Errorf("product %s does not support a registry", desired.Product)
	}
	if desired.KubernetesVersion != "" && !supportsKubernetesVersion(clusterid.Product(desired.Product), desired.KubernetesVersion) {
		return nil, fmt.Errorf("product %s does not support a custom Kubernetes version", desired.Product)
	}
	if desired.KindV1Alpha4Cluster != nil && clusterid.Product(desired.Product) != clusterid.ProductKIND {
		return nil, fmt.Errorf("kind config may only be set on clusters with product: kind. Actual product: %s", desired.Product)
	}
	if desired.Minikube != nil && clusterid.Product(desired.Product) != clusterid.ProductMinikube {
		return nil, fmt.Errorf("minikube config may only be set on clusters with product: minikube. Actual product: %s", desired.Product)
	}
	if desired.K3D != nil && clusterid.Product(desired.Product) != clusterid.ProductK3D {
		return nil, fmt.Errorf("k3d config may only be set on clusters with product: k3d. Actual product: %s", desired.Product)
	}

	FillDefaults(desired)

	// Fetch the machine driver for this product and cluster name,
	// and use it to apply the constraints to the underlying VM.
	machine, err := c.machine(ctx, desired.Name, clusterid.Product(desired.Product))
	if err != nil {
		return nil, err
	}

	// First, we have to make sure the machine driver has started, so that we can
	// query it at all for the existing configuration.
	err = machine.EnsureExists(ctx)
	if err != nil {
		return nil, err
	}

	// EnsureExists may have to refresh the connection to the apiserver,
	// so refresh our clients.
	err = c.reloadConfigs()
	if err != nil {
		return nil, err
	}

	existingCluster, err := c.Get(ctx, desired.Name)
	if err != nil && !apierrors.IsNotFound(err) {
		return nil, err
	}

	if existingCluster == nil {
		existingCluster = &api.Cluster{}
	}

	// If we can't reconcile the two clusters, delete it now.
	// TODO(nick): Check for a --force flag, and only delete the cluster
	// if there's a --force.

	err = c.deleteIfIrreconcilable(ctx, desired, existingCluster)
	if err != nil {
		return nil, err
	}

	// Fetch the admin driver for this product, for setting up the cluster on top of
	// the machine.
	admin, err := c.admin(ctx, clusterid.Product(desired.Product))
	if err != nil {
		return nil, err
	}

	existingStatus := existingCluster.Status
	needsRestart := existingStatus.CreationTimestamp.Time.IsZero() ||
		existingStatus.CPUs < desired.MinCPUs
	if needsRestart {
		err := machine.Restart(ctx, desired, existingCluster)
		if err != nil {
			return nil, err
		}
	}

	reg, err := c.ensureRegistryExistsForCluster(ctx, desired)
	if err != nil {
		return nil, err
	}

	// Configure the cluster to match what we want.
	needsCreate := existingStatus.CreationTimestamp.Time.IsZero() ||
		desired.Name != existingCluster.Name ||
		desired.Product != existingCluster.Product
	if needsCreate {
		err := admin.Create(ctx, desired, reg)
		if err != nil {
			return nil, err
		}

		err = c.waitForContextCreate(ctx, desired)
		if err != nil {
			return nil, err
		}
	}

	// Update the kubectl context to match this cluster.
	err = c.configWriter.SetContext(desired.Name)
	if err != nil {
		return nil, fmt.Errorf("switching to cluster context %s: %v", desired.Name, err)
	}

	err = c.reloadConfigs()
	if err != nil {
		return nil, err
	}

	if needsCreate {
		// If the cluster apiserver is in a remote docker cluster,
		// set up a portforwarder.
		err := c.maybeCreateForwarderForCurrentCluster(ctx, c.iostreams.ErrOut)
		if err != nil {
			return nil, err
		}

		err = c.maybeFixKubeConfigInsideContainer(ctx, desired)
		if err != nil {
			return nil, err
		}

		err = c.waitForHealthCheckAfterCreate(ctx, desired)
		if err != nil {
			return nil, err
		}

		err = c.writeClusterSpec(ctx, desired)
		if err != nil {
			return nil, errors.Wrap(err, "configuring cluster")
		}

		if desired.Registry != "" {
			err = c.createRegistryHosting(ctx, admin, desired, reg)
			if err != nil {
				return nil, errors.Wrap(err, "configuring cluster registry")
			}
		}
	}

	return c.Get(ctx, desired.Name)
}

// Writes the cluster spec to the cluster itself, so
// we can read it later to determine how the cluster was initialized.
func (c *Controller) writeClusterSpec(ctx context.Context, cluster *api.Cluster) error {
	client, err := c.client(cluster.Name)
	if err != nil {
		return err
	}

	specOnly := cluster.DeepCopy()
	specOnly.Status = api.ClusterStatus{}
	data, err := yaml.Marshal(specOnly)
	if err != nil {
		return err
	}

	err = client.CoreV1().ConfigMaps("kube-public").Delete(ctx, clusterSpecConfigMap, metav1.DeleteOptions{})
	if err != nil && !apierrors.IsNotFound(err) {
		return err
	}

	_, err = client.CoreV1().ConfigMaps("kube-public").Create(ctx, &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      clusterSpecConfigMap,
			Namespace: "kube-public",
		},
		Data: map[string]string{"cluster.v1alpha1": string(data)},
	}, metav1.CreateOptions{})
	return err
}

// Create a configmap on the cluster, so that other tools know that a registry
// has been configured.
func (c *Controller) createRegistryHosting(ctx context.Context, admin Admin, cluster *api.Cluster, reg *api.Registry) error {
	hosting, err := admin.LocalRegistryHosting(ctx, cluster, reg)
	if err != nil {
		return err
	}
	if hosting == nil {
		return nil
	}

	client, err := c.client(cluster.Name)
	if err != nil {
		return err
	}

	data, err := yaml.Marshal(hosting)
	if err != nil {
		return err
	}

	_, err = client.CoreV1().ConfigMaps("kube-public").Create(ctx, &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "local-registry-hosting",
			Namespace: "kube-public",
		},
		Data: map[string]string{"localRegistryHosting.v1": string(data)},
	}, metav1.CreateOptions{})
	if err != nil {
		return err
	}

	_, _ = fmt.Fprintf(c.iostreams.ErrOut, " 🔌 Connected cluster %s to registry %s at %s\n", cluster.Name, reg.Name, hosting.Host)
	_, _ = fmt.Fprintf(c.iostreams.ErrOut, " 👐 Push images to the cluster like 'docker push %s/alpine'\n", hosting.Host)

	return nil
}

func (c *Controller) Delete(ctx context.Context, name string) error {
	existing, err := c.Get(ctx, name)
	if err != nil {
		return err
	}

	admin, err := c.admin(ctx, clusterid.Product(existing.Product))
	if err != nil {
		return err
	}

	err = admin.Delete(ctx, existing)
	if err != nil {
		return err
	}

	err = c.reloadConfigs()
	if err != nil {
		return err
	}

	// If the context is still in the configs, delete it.
	_, ok := c.configCopy().Contexts[existing.Name]
	if ok {
		return c.configWriter.DeleteContext(existing.Name)
	}
	return nil
}

func (c *Controller) reloadConfigs() error {
	config, err := c.configLoader()
	if err != nil {
		return err
	}

	c.mu.Lock()
	defer c.mu.Unlock()
	c.config = config
	c.clients = make(map[string]kubernetes.Interface)
	return nil
}

func (c *Controller) Current(ctx context.Context) (*api.Cluster, error) {
	current := c.configCurrent()
	if current == "" {
		return nil, fmt.Errorf("no cluster selected in kubeconfig")
	}
	return c.Get(ctx, current)
}

func (c *Controller) Get(ctx context.Context, name string) (*api.Cluster, error) {
	config := c.configCopy()
	ct, ok := config.Contexts[name]
	if !ok {
		return nil, apierrors.NewNotFound(groupResource, name)
	}

	configCluster, ok := config.Clusters[ct.Cluster]
	if !ok {
		return nil, apierrors.NewNotFound(groupResource, name)
	}

	cluster := &api.Cluster{
		TypeMeta: typeMeta,
		Name:     name,
		Product:  clusterid.ProductFromContext(ct, configCluster).String(),
	}
	c.populateCluster(ctx, cluster)

	return cluster, nil
}

func (c *Controller) List(ctx context.Context, options ListOptions) (*api.ClusterList, error) {
	selector, err := fields.ParseSelector(options.FieldSelector)
	if err != nil {
		return nil, err
	}

	config := c.configCopy()
	names := make([]string, 0, len(c.config.Contexts))
	for name, ct := range config.Contexts {
		_, ok := config.Clusters[ct.Cluster]
		if !ok {
			// Filter out malformed contexts.
			continue
		}

		names = append(names, name)
	}
	sort.Strings(names)

	// Listing all clusters can take a long time, so parallelize it.
	all := make([]*api.Cluster, len(names))
	g, ctx := errgroup.WithContext(ctx)

	for i, name := range names {
		ct := c.config.Contexts[name]
		name := name
		i := i
		g.Go(func() error {
			cluster := &api.Cluster{
				TypeMeta: typeMeta,
				Name:     name,
				Product:  clusterid.ProductFromContext(ct, config.Clusters[ct.Cluster]).String(),
			}
			if !selector.Matches((*clusterFields)(cluster)) {
				return nil
			}
			c.populateCluster(ctx, cluster)
			all[i] = cluster
			return nil
		})
	}

	err = g.Wait()
	if err != nil {
		return nil, err
	}

	result := []api.Cluster{}
	for _, c := range all {
		if c == nil {
			continue
		}
		result = append(result, *c)
	}

	return &api.ClusterList{
		TypeMeta: listTypeMeta,
		Items:    result,
	}, nil
}

// If the current cluster is on a remote docker instance,
// we need a port-forwarder to connect it.
func (c *Controller) maybeCreateForwarderForCurrentCluster(ctx context.Context, errOut io.Writer) error {
	dockerCLI, err := c.getDockerCLI(ctx)
	if err != nil {
		return err
	}

	if docker.IsLocalHost(dockerCLI.Client().DaemonHost()) {
		return nil
	}

	port := c.currentAPIServerPort()
	if port == 0 {
		return nil
	}

	socat, err := c.getSocatController(ctx)
	if err != nil {
		return err
	}

	_, _ = fmt.Fprintf(errOut, " 🎮 Env DOCKER_HOST set. Assuming remote Docker and forwarding apiserver to localhost:%d\n", port)
	return socat.ConnectRemoteDockerPort(ctx, port)
}

// Docker-Desktop may be slow to write the kubernetes context
// back to the config, so we have to wait until it appears.
func (c *Controller) waitForContextCreate(ctx context.Context, cluster *api.Cluster) error {
	refreshAndCheckOK := func() error {
		err := c.reloadConfigs()
		if err != nil {
			return err
		}
		_, err = c.client(cluster.Name)
		if err != nil {
			return err
		}
		return nil
	}

	err := refreshAndCheckOK()
	if err == nil {
		return nil
	}

	_, _ = fmt.Fprintf(c.iostreams.ErrOut, "Waiting %s for cluster %q to create kubectl context...\n",
		duration.ShortHumanDuration(c.waitForKubeConfigTimeout), cluster.Name)
	var lastErr error
	err = wait.PollUntilContextTimeout(ctx, time.Second, c.waitForKubeConfigTimeout, true, func(ctx context.Context) (bool, error) {
		err := refreshAndCheckOK()
		lastErr = err
		isSuccess := err == nil
		return isSuccess, nil
	})
	if err != nil {
		return fmt.Errorf("kubernetes context never created: %v", lastErr)
	}
	return nil
}

// Our cluster creation tools aren't super trustworthy.
//
// After the cluster is created, we poll the kubeconfig until
// the cluster context has been created and the cluster becomes healthy.
//
// https://github.com/tilt-dev/ctlptl/issues/87
// https://github.com/tilt-dev/ctlptl/issues/131
func (c *Controller) waitForHealthCheckAfterCreate(ctx context.Context, cluster *api.Cluster) error {
	checkOK := func() error {
		client, err := c.client(cluster.Name)
		if err != nil {
			return err
		}

		// quick apiserver health check.
		_, err = c.healthCheckCluster(ctx, client)
		if err != nil {
			return err
		}

		// make sure the kube-public namespace exists,
		// because this is where ctlptl writes its configs.
		_, err = client.CoreV1().Namespaces().Get(ctx, "kube-public", metav1.GetOptions{})
		if err != nil {
			return err
		}

		return nil
	}

	// If the tool properly waited for the cluster to init,
	// return immediately.
	err := checkOK()
	if err == nil {
		return nil
	}

	_, _ = fmt.Fprintf(c.iostreams.ErrOut, "Waiting %s for Kubernetes cluster %q to start...\n",
		duration.ShortHumanDuration(c.waitForClusterCreateTimeout), cluster.Name)
	var lastErr error
	err = wait.PollUntilContextTimeout(ctx, time.Second, c.waitForClusterCreateTimeout, true, func(ctx context.Context) (bool, error) {
		err := checkOK()
		lastErr = err
		isSuccess := err == nil
		return isSuccess, nil
	})
	if err != nil {
		return fmt.Errorf("timed out waiting for cluster to start: %v", lastErr)
	}
	return nil
}

// maybeFixKubeConfigInsideContainer modifies the kubeconfig to allow access to
// the cluster from a container attached to the same network as the cluster, if
// currently running inside a container and the cluster admin object supports
// the modifications.
func (c *Controller) maybeFixKubeConfigInsideContainer(ctx context.Context, cluster *api.Cluster) error {
	containerID := insideContainer(ctx, c.dockerCLI.Client())
	if containerID == "" {
		return nil
	}

	admin, err := c.admin(ctx, clusterid.Product(cluster.Product))
	if err != nil {
		return err
	}

	adminInC, ok := admin.(AdminInContainer)
	if !ok {
		return nil
	}

	err = adminInC.ModifyConfigInContainer(ctx, cluster, containerID, c.dockerCLI.Client(), c.configWriter)
	if err != nil {
		return fmt.Errorf("error updating kube config: %w", err)
	}

	return c.reloadConfigs()
}
0707010000005E000081A400000000000000000000000168AFB0EA00005FD2000000000000000000000000000000000000002A00000000ctlptl-0.8.43/pkg/cluster/cluster_test.gopackage cluster

import (
	"bytes"
	"context"
	"fmt"
	"io"
	"os"
	"testing"
	"time"

	"github.com/docker/docker/api/types"
	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/image"
	"github.com/docker/docker/api/types/network"
	registrytypes "github.com/docker/docker/api/types/registry"
	"github.com/docker/docker/api/types/system"
	dockerregistry "github.com/docker/docker/registry"
	specs "github.com/opencontainers/image-spec/specs-go/v1"
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	v1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/apimachinery/pkg/version"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	discoveryfake "k8s.io/client-go/discovery/fake"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/kubernetes/fake"
	"k8s.io/client-go/rest"
	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
	"sigs.k8s.io/kind/pkg/apis/config/v1alpha4"

	"github.com/tilt-dev/clusterid"
	"github.com/tilt-dev/localregistry-go"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/registry"
)

func TestClusterGet(t *testing.T) {
	c := newFakeController(t)
	cluster, err := c.Get(context.Background(), "microk8s")
	assert.NoError(t, err)
	assert.Equal(t, cluster.Name, "microk8s")
	assert.Equal(t, cluster.Product, "microk8s")
}

func TestClusterCurrent(t *testing.T) {
	c := newFakeController(t)
	cluster, err := c.Current(context.Background())
	assert.NoError(t, err)
	assert.Equal(t, cluster.Name, "microk8s")
	assert.Equal(t, cluster.Product, "microk8s")
}

func TestDeleteClusterContext(t *testing.T) {
	f := newFixture(t)

	admin := f.newFakeAdmin("docker-desktop")

	_, exists := f.config.Contexts["docker-desktop"]
	assert.True(t, exists)
	err := f.controller.Delete(context.Background(), "docker-desktop")
	assert.NoError(t, err)
	assert.Equal(t, "docker-desktop", admin.deleted.Name)

	_, exists = f.config.Contexts["docker-desktop"]
	assert.False(t, exists)
}

func TestClusterList(t *testing.T) {
	c := newFakeController(t)
	clusters, err := c.List(context.Background(), ListOptions{})
	assert.NoError(t, err)
	require.Equal(t, 2, len(clusters.Items))
	assert.Equal(t, "docker-desktop", clusters.Items[0].Name)
	assert.Equal(t, "microk8s", clusters.Items[1].Name)
}

func TestClusterStatusError(t *testing.T) {
	c := newFakeController(t)
	clusters, err := c.List(context.Background(), ListOptions{})
	assert.NoError(t, err)
	require.Equal(t, 2, len(clusters.Items))
	assert.Equal(t, "reading status: not started", clusters.Items[0].Status.Error)
	assert.Equal(t, "", clusters.Items[1].Status.Error)
}

// Make sure that an empty config doesn't confuse ctlptl.
func TestClusterListEmptyConfig(t *testing.T) {
	c := newFakeController(t)
	c.config.Contexts["kind"] = &clientcmdapi.Context{}

	clusters, err := c.List(context.Background(), ListOptions{})
	assert.NoError(t, err)
	require.Equal(t, 2, len(clusters.Items))
	assert.Equal(t, "docker-desktop", clusters.Items[0].Name)
	assert.Equal(t, "microk8s", clusters.Items[1].Name)
}

func TestClusterListSelectorMatch(t *testing.T) {
	c := newFakeController(t)
	clusters, err := c.List(context.Background(), ListOptions{FieldSelector: "product=microk8s"})
	assert.NoError(t, err)
	require.Equal(t, 1, len(clusters.Items))
	assert.Equal(t, "microk8s", clusters.Items[0].Name)
}

func TestClusterListSelectorNoMatch(t *testing.T) {
	c := newFakeController(t)
	clusters, err := c.List(context.Background(), ListOptions{FieldSelector: "product=kind"})
	assert.NoError(t, err)
	assert.Equal(t, 0, len(clusters.Items))
}

func TestClusterGetMissing(t *testing.T) {
	c := newFakeController(t)
	_, err := c.Get(context.Background(), "dunkees")
	if assert.Error(t, err) {
		assert.True(t, errors.IsNotFound(err))
	}
}

func TestClusterApplyKIND(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	kindAdmin := f.newFakeAdmin(clusterid.ProductKIND)

	result, err := f.controller.Apply(context.Background(), &api.Cluster{
		Product: string(clusterid.ProductKIND),
	})
	require.NoError(t, err)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, "kind-kind", kindAdmin.created.Name)
	assert.Equal(t, "kind-kind", result.Name)
}

// Make sure an empty context doesn't confuse ctlptl.
func TestClusterApplyKINDEmptyConfig(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	f.config.Contexts["kind"] = &clientcmdapi.Context{}

	assert.Equal(t, false, f.d4m.started)
	kindAdmin := f.newFakeAdmin(clusterid.ProductKIND)

	result, err := f.controller.Apply(context.Background(), &api.Cluster{
		Product: string(clusterid.ProductKIND),
	})
	require.NoError(t, err)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, "kind-kind", kindAdmin.created.Name)
	assert.Equal(t, "kind-kind", result.Name)
}

func TestClusterApplyFailsToStart(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	out := bytes.NewBuffer(nil)
	f.controller.iostreams.ErrOut = out

	assert.Equal(t, false, f.d4m.started)
	_ = f.newFakeAdmin(clusterid.ProductKIND)

	// Pretend that the kube-public namespace is never created.
	err := f.fakeK8s.CoreV1().Namespaces().Delete(
		context.Background(), "kube-public", metav1.DeleteOptions{})
	require.NoError(t, err)

	_, err = f.controller.Apply(context.Background(), &api.Cluster{
		Product: string(clusterid.ProductKIND),
	})
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), "timed out waiting for cluster to start")
		assert.Contains(t, out.String(), "Waiting 0s for Kubernetes cluster \"kind-kind\" to start")
	}
}

func TestClusterApplyKINDWithCluster(t *testing.T) {
	f := newFixture(t)
	f.setOS("linux")

	f.dockerClient.started = true

	kindAdmin := f.newFakeAdmin(clusterid.ProductKIND)

	result, err := f.controller.Apply(context.Background(), &api.Cluster{
		Product:  string(clusterid.ProductKIND),
		Registry: "kind-registry",
	})
	assert.NoError(t, err)
	assert.Equal(t, "kind-kind", result.Name)
	assert.Equal(t, "kind-registry", result.Registry)
	assert.Equal(t, "kind-registry", kindAdmin.createdRegistry.Name)
	assert.Equal(t, 5000, kindAdmin.createdRegistry.Status.ContainerPort)
	assert.Equal(t, "kind-registry", f.registryCtl.lastApply.Name)
}

func TestClusterApplyDockerDesktop(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	assert.Equal(t, 1, f.dockerClient.ncpu)
	f.apply(clusterid.ProductDockerDesktop, 3)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, 3, f.dockerClient.ncpu)
}

func TestClusterApplyDockerDesktopLinux(t *testing.T) {
	f := newFixture(t)
	f.setOS("linux")

	assert.Equal(t, false, f.d4m.started)
	assert.Equal(t, 1, f.dockerClient.ncpu)
	f.apply(clusterid.ProductDockerDesktop, 3)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, 3, f.dockerClient.ncpu)
}

func TestClusterApplyDockerDesktopLinuxEngine(t *testing.T) {
	f := newFixture(t)
	f.setOS("linux")
	f.dockerClient.host = "unix:///var/run/docker.sock"

	cluster := &api.Cluster{
		Product: string(clusterid.ProductDockerDesktop),
	}
	_, err := f.controller.Apply(context.Background(), cluster)
	require.Error(f.t, err)
	require.Contains(f.t, err.Error(),
		"Not connected to Docker Engine. Host: \"unix:///var/run/docker.sock\". Error: not started")
}

func TestClusterApplyDockerDesktopCPUOnly(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	err := f.d4m.Open(context.Background())
	require.NoError(t, err)

	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, 1, f.dockerClient.ncpu)
	f.apply(clusterid.ProductDockerDesktop, 3)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, 3, f.dockerClient.ncpu)
}

func TestClusterApplyDockerDesktopStartClusterOnly(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	assert.Equal(t, 1, f.dockerClient.ncpu)
	f.apply(clusterid.ProductDockerDesktop, 0)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, 1, f.dockerClient.ncpu)
}

func TestClusterApplyDockerDesktopNoRestart(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, 0, f.d4m.settingsWriteCount)

	// Pretend the cluster isn't running.
	err := f.fakeK8s.Tracker().Delete(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}, "", "node-1")
	assert.NoError(t, err)
	f.apply(clusterid.ProductDockerDesktop, 0)
	assert.Equal(t, 1, f.d4m.settingsWriteCount)
	assert.Equal(t, 1, f.d4m.resetCount)
	f.apply(clusterid.ProductDockerDesktop, 0)
	assert.Equal(t, 1, f.d4m.settingsWriteCount)
}

func TestClusterApplyMinikubeVersion(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	minikubeAdmin := f.newFakeAdmin(clusterid.ProductMinikube)

	result, err := f.controller.Apply(context.Background(), &api.Cluster{
		Product:           string(clusterid.ProductMinikube),
		KubernetesVersion: "v1.14.0",
	})
	assert.NoError(t, err)
	assert.Equal(t, true, f.d4m.started)
	assert.Equal(t, "minikube", minikubeAdmin.created.Name)
	assert.Equal(t, "minikube", result.Name)
	assert.Equal(t, "minikube", f.config.CurrentContext)

	minikubeAdmin.created = nil

	_, err = f.controller.Apply(context.Background(), &api.Cluster{
		Product:           string(clusterid.ProductMinikube),
		KubernetesVersion: "v1.14.0",
	})
	assert.NoError(t, err)

	// Make sure we don't recreate the cluster.
	assert.Nil(t, minikubeAdmin.created)

	// Now, change the version and make sure we re-create the cluster.
	out := bytes.NewBuffer(nil)
	f.controller.iostreams.ErrOut = out

	_, err = f.controller.Apply(context.Background(), &api.Cluster{
		Product:           string(clusterid.ProductMinikube),
		KubernetesVersion: "v1.15.0",
	})
	assert.NoError(t, err)

	assert.Equal(t, "minikube", minikubeAdmin.created.Name)
	assert.Contains(t, out.String(),
		"Deleting cluster minikube because desired Kubernetes version (v1.15.0) "+
			"does not match current (v1.14.0)")
}

func TestFillDefaultsKindConfig(t *testing.T) {
	c := &api.Cluster{
		Product: "kind",
		KindV1Alpha4Cluster: &v1alpha4.Cluster{
			Name: "my-cluster",
		},
	}
	FillDefaults(c)
	assert.Equal(t, "kind-my-cluster", c.Name)

	c.KindV1Alpha4Cluster.Name = "your-cluster"
	FillDefaults(c)
	assert.Equal(t, "my-cluster", c.KindV1Alpha4Cluster.Name)
}

func TestClusterApplyKindConfig(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	kindAdmin := f.newFakeAdmin(clusterid.ProductKIND)

	cluster := &api.Cluster{
		Product: string(clusterid.ProductKIND),
		KindV1Alpha4Cluster: &v1alpha4.Cluster{
			Nodes: []v1alpha4.Node{
				v1alpha4.Node{Role: "control-plane"},
			},
		},
	}
	_, err := f.controller.Apply(context.Background(), cluster)
	assert.NoError(t, err)
	assert.Equal(t, "kind-kind", kindAdmin.created.Name)
	kindAdmin.created = nil

	// Assert that re-applying the same config doesn't create a new cluster.
	_, err = f.controller.Apply(context.Background(), cluster)
	assert.NoError(t, err)
	assert.Nil(t, kindAdmin.created)
	assert.Nil(t, kindAdmin.deleted)

	// Assert that applying a different config deletes and re-creates.
	cluster2 := &api.Cluster{
		Product: string(clusterid.ProductKIND),
		KindV1Alpha4Cluster: &v1alpha4.Cluster{
			Nodes: []v1alpha4.Node{
				v1alpha4.Node{Role: "control-plane"},
				v1alpha4.Node{Role: "worker"},
			},
		},
	}

	f.errOut.Truncate(0)
	_, err = f.controller.Apply(context.Background(), cluster2)
	assert.NoError(t, err)
	assert.Equal(t, "kind-kind", kindAdmin.created.Name)
	assert.Equal(t, "kind-kind", kindAdmin.deleted.Name)
	assert.Contains(t, f.errOut.String(), "desired Kind config does not match current")
}

func TestClusterApplyMinikubeConfig(t *testing.T) {
	f := newFixture(t)
	f.setOS("darwin")

	assert.Equal(t, false, f.d4m.started)
	minikubeAdmin := f.newFakeAdmin(clusterid.ProductMinikube)

	cluster := &api.Cluster{
		Product: string(clusterid.ProductMinikube),
		Minikube: &api.MinikubeCluster{
			ContainerRuntime: "docker",
		},
	}
	_, err := f.controller.Apply(context.Background(), cluster)
	assert.NoError(t, err)
	assert.Equal(t, "minikube", minikubeAdmin.created.Name)
	minikubeAdmin.created = nil

	// Assert that re-applying the same config doesn't create a new cluster.
	_, err = f.controller.Apply(context.Background(), cluster)
	assert.NoError(t, err)
	assert.Nil(t, minikubeAdmin.created)
	assert.Nil(t, minikubeAdmin.deleted)

	// Assert that applying a different config deletes and re-creates.
	cluster2 := &api.Cluster{
		Product: string(clusterid.ProductMinikube),
		Minikube: &api.MinikubeCluster{
			ContainerRuntime: "containerd",
		},
	}

	f.errOut.Truncate(0)
	_, err = f.controller.Apply(context.Background(), cluster2)
	assert.NoError(t, err)
	assert.Equal(t, "minikube", minikubeAdmin.created.Name)
	assert.Equal(t, "minikube", minikubeAdmin.deleted.Name)
	assert.Contains(t, f.errOut.String(), "desired Minikube config does not match current")
}

func TestClusterFixKubeConfigInContainer(t *testing.T) {
	f := newFixture(t)
	ctx := context.Background()

	t.Run("when not in a container", func(t *testing.T) {
		cluster := &api.Cluster{Product: string(clusterid.ProductKIND)}
		assert.NoError(t, f.controller.maybeFixKubeConfigInsideContainer(ctx, cluster))
		assert.Empty(t, f.dockerClient.networks)
		assert.Empty(t, f.configWriter.opts)
	})

	t.Run("with a cluster that doesn't support AdminInContainer", func(t *testing.T) {
		f.dockerClient.containerID = "012345abcdef"
		cluster := &api.Cluster{Product: string(clusterid.ProductK3D)}
		assert.NoError(t, f.controller.maybeFixKubeConfigInsideContainer(ctx, cluster))
		assert.Empty(t, f.dockerClient.networks)
		assert.Empty(t, f.configWriter.opts)
	})

	t.Run("when in a container and using a KIND cluster", func(t *testing.T) {
		f.dockerClient.containerID = "012345abcdef"
		cluster := &api.Cluster{
			Name:    "kind-test",
			Product: string(clusterid.ProductKIND),
		}
		assert.NoError(t, f.controller.maybeFixKubeConfigInsideContainer(ctx, cluster))
		assert.Contains(t, f.dockerClient.networks, kindNetworkName())
		assert.Equal(t, "https://test-control-plane:6443", f.configWriter.opts["clusters.kind-test.server"])
	})
}

type fixture struct {
	t            *testing.T
	errOut       *bytes.Buffer
	controller   *Controller
	dockerClient *fakeDockerClient
	dmachine     *dockerMachine
	d4m          *fakeD4MClient
	config       *clientcmdapi.Config
	configWriter fakeConfigWriter
	registryCtl  *fakeRegistryController
	fakeK8s      *fake.Clientset
}

func newFixture(t *testing.T) *fixture {
	_ = os.Setenv("DOCKER_HOST", "")

	osName := "darwin" // default to macos
	dockerClient := &fakeDockerClient{host: "unix:///home/nick/.docker/desktop/docker.sock", ncpu: 1}
	d4m := &fakeD4MClient{docker: dockerClient}
	dmachine := &dockerMachine{
		dockerClient: dockerClient,
		iostreams:    genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr},
		sleep:        func(d time.Duration) {},
		d4m:          d4m,
		os:           osName,
	}
	config := &clientcmdapi.Config{
		CurrentContext: "microk8s",
		Contexts: map[string]*clientcmdapi.Context{
			"microk8s": &clientcmdapi.Context{
				Cluster: "microk8s-cluster",
			},
			"docker-desktop": &clientcmdapi.Context{
				Cluster: "docker-desktop",
			},
		},
		Clusters: map[string]*clientcmdapi.Cluster{
			"microk8s-cluster": &clientcmdapi.Cluster{Server: "http://microk8s.localhost/"},
			"docker-desktop":   &clientcmdapi.Cluster{Server: "http://docker-desktop.localhost/"},
		},
	}
	configLoader := configLoader(func() (clientcmdapi.Config, error) {
		return *config, nil
	})
	configWriter := fakeConfigWriter{config: config, opts: make(map[string]string)}
	iostreams := genericclioptions.IOStreams{
		In:     os.Stdin,
		Out:    bytes.NewBuffer(nil),
		ErrOut: bytes.NewBuffer(nil),
	}
	node := &v1.Node{
		ObjectMeta: metav1.ObjectMeta{
			Name:              "node-1",
			CreationTimestamp: metav1.Time{Time: time.Now()},
		},
	}
	ns := &v1.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name:              "kube-public",
			CreationTimestamp: metav1.Time{Time: time.Now()},
		},
	}
	fakeK8s := fake.NewSimpleClientset(node, ns)
	clientLoader := clientLoader(func(restConfig *rest.Config) (kubernetes.Interface, error) {
		return fakeK8s, nil
	})

	registryCtl := &fakeRegistryController{}
	controller := &Controller{
		iostreams:                   iostreams,
		runner:                      exec.NewFakeCmdRunner(func(argv []string) string { return "" }),
		admins:                      make(map[clusterid.Product]Admin),
		config:                      *config,
		configWriter:                configWriter,
		dmachine:                    dmachine,
		configLoader:                configLoader,
		clientLoader:                clientLoader,
		clients:                     make(map[string]kubernetes.Interface),
		registryCtl:                 registryCtl,
		waitForKubeConfigTimeout:    time.Millisecond,
		waitForClusterCreateTimeout: time.Millisecond,
		os:                          osName,
		dockerCLI:                   &fakeCLI{client: dockerClient},
	}
	return &fixture{
		t:            t,
		errOut:       iostreams.ErrOut.(*bytes.Buffer),
		controller:   controller,
		dmachine:     dmachine,
		d4m:          d4m,
		dockerClient: dockerClient,
		config:       config,
		configWriter: configWriter,
		registryCtl:  registryCtl,
		fakeK8s:      fakeK8s,
	}
}

func (f *fixture) apply(product clusterid.Product, cpus int) {
	cluster := &api.Cluster{
		Product: string(product),
		MinCPUs: cpus,
	}
	_, err := f.controller.Apply(context.Background(), cluster)
	require.NoError(f.t, err)
}

func (f *fixture) setOS(os string) {
	f.controller.os = os
	f.dmachine.os = os
}

func (f *fixture) newFakeAdmin(p clusterid.Product) *fakeAdmin {
	admin := newFakeAdmin(f.config, f.fakeK8s)
	f.controller.admins[p] = admin
	return admin
}

func newFakeController(t *testing.T) *Controller {
	return newFixture(t).controller
}

type fakeCLI struct {
	client *fakeDockerClient
}

func (c *fakeCLI) Client() dctr.Client {
	return c.client
}

func (c *fakeCLI) AuthInfo(ctx context.Context, repoInfo *dockerregistry.RepositoryInfo, cmdName string) (string, registrytypes.RequestAuthConfig, error) {
	return "", nil, nil
}

type fakeDockerClient struct {
	started     bool
	ncpu        int
	host        string
	networks    []string
	containerID string
}

func (c *fakeDockerClient) DaemonHost() string {
	return c.host
}

func (c *fakeDockerClient) ServerVersion(ctx context.Context) (types.Version, error) {
	if !c.started {
		return types.Version{}, fmt.Errorf("not started")
	}

	return types.Version{}, nil
}

func (c *fakeDockerClient) Info(ctx context.Context) (system.Info, error) {
	if !c.started {
		return system.Info{}, fmt.Errorf("not started")
	}

	return system.Info{NCPU: c.ncpu}, nil
}

func (c *fakeDockerClient) ContainerInspect(ctx context.Context, id string) (container.InspectResponse, error) {
	return container.InspectResponse{}, nil
}

func (d *fakeDockerClient) ContainerRemove(ctx context.Context, id string, options container.RemoveOptions) error {
	return nil
}

func (d *fakeDockerClient) ImagePull(ctx context.Context, image string, options image.PullOptions) (io.ReadCloser, error) {
	return nil, nil
}

func (d *fakeDockerClient) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) {
	return nil, nil
}

func (d *fakeDockerClient) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) {
	return container.CreateResponse{}, nil
}
func (d *fakeDockerClient) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error {
	return nil
}

func (d *fakeDockerClient) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
	d.networks = append(d.networks, networkID)
	return nil
}

func (d *fakeDockerClient) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
	if len(d.networks) == 0 {
		return nil
	}
	networks := []string{}
	for _, n := range d.networks {
		if n != networkID {
			networks = append(networks, n)
		}
	}
	d.networks = networks
	return nil
}

func (d *fakeDockerClient) insideContainer(ctx context.Context) string {
	return d.containerID
}

type fakeD4MClient struct {
	lastSettings       map[string]interface{}
	docker             *fakeDockerClient
	started            bool
	settingsWriteCount int
	resetCount         int
}

func (c *fakeD4MClient) writeSettings(ctx context.Context, settings map[string]interface{}) error {
	c.lastSettings = settings
	c.docker.ncpu = settings["cpu"].(int)
	c.settingsWriteCount++
	return nil
}

func (c *fakeD4MClient) settings(ctx context.Context) (map[string]interface{}, error) {
	if c.lastSettings == nil {
		c.lastSettings = make(map[string]interface{})
	}
	return c.lastSettings, nil
}

func (c *fakeD4MClient) setK8sEnabled(settings map[string]interface{}, desired bool) (bool, error) {
	enabled, ok := settings["k8sEnabled"]
	if ok && enabled.(bool) == true {
		return false, nil
	}
	settings["k8sEnabled"] = true
	return true, nil
}

func (c *fakeD4MClient) ensureMinCPU(settings map[string]interface{}, desired int) (bool, error) {
	cpu, ok := settings["cpu"]
	if ok && cpu.(int) >= desired {
		return false, nil
	}
	settings["cpu"] = desired
	return true, nil

}

func (c *fakeD4MClient) ResetCluster(ctx context.Context) error {
	c.resetCount++
	return nil
}

func (c *fakeD4MClient) Open(ctx context.Context) error {
	c.lastSettings = map[string]interface{}{
		"cpu": c.docker.ncpu,
	}
	c.started = true
	c.docker.started = true
	return nil
}

type fakeAdmin struct {
	created         *api.Cluster
	createdRegistry *api.Registry
	deleted         *api.Cluster
	config          *clientcmdapi.Config
	fakeK8s         *fake.Clientset
}

func newFakeAdmin(config *clientcmdapi.Config, fakeK8s *fake.Clientset) *fakeAdmin {
	return &fakeAdmin{config: config, fakeK8s: fakeK8s}
}

func (a *fakeAdmin) EnsureInstalled(ctx context.Context) error { return nil }

func (a *fakeAdmin) Create(ctx context.Context, config *api.Cluster, registry *api.Registry) error {
	a.created = config.DeepCopy()
	a.createdRegistry = registry.DeepCopy()
	a.config.Contexts[config.Name] = &clientcmdapi.Context{Cluster: config.Name}
	a.config.Clusters[config.Name] = &clientcmdapi.Cluster{Server: fmt.Sprintf("http://%s.localhost/", config.Name)}

	kVersion := config.KubernetesVersion
	if kVersion == "" {
		kVersion = "v1.19.1"
	}
	a.fakeK8s.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = &version.Info{
		GitVersion: kVersion,
	}
	return nil
}

func (a *fakeAdmin) LocalRegistryHosting(ctx context.Context, cluster *api.Cluster, registry *api.Registry) (*localregistry.LocalRegistryHostingV1, error) {
	return &localregistry.LocalRegistryHostingV1{
		Host: fmt.Sprintf("localhost:%d", registry.Status.HostPort),
		Help: "https://github.com/tilt-dev/ctlptl",
	}, nil
}

func (a *fakeAdmin) Delete(ctx context.Context, config *api.Cluster) error {
	a.deleted = config.DeepCopy()
	delete(a.config.Contexts, config.Name)
	return nil
}

type fakeRegistryController struct {
	lastApply *api.Registry
}

func (c *fakeRegistryController) List(ctx context.Context, options registry.ListOptions) (*api.RegistryList, error) {
	list := &api.RegistryList{}
	if c.lastApply != nil {
		item := c.lastApply.DeepCopy()
		list.Items = append(list.Items, *item)
	}
	return list, nil
}

func (c *fakeRegistryController) Apply(ctx context.Context, r *api.Registry) (*api.Registry, error) {
	c.lastApply = r.DeepCopy()

	newR := r.DeepCopy()
	newR.Status = api.RegistryStatus{
		ContainerPort: 5000,
		ContainerID:   "fake-container-id",
		HostPort:      5000,
		IPAddress:     "172.0.0.2",
		Networks:      []string{"bridge"},
	}
	return newR, nil
}

type fakeConfigWriter struct {
	config *clientcmdapi.Config
	opts   map[string]string
}

func (w fakeConfigWriter) SetContext(name string) error {
	w.config.CurrentContext = name
	return nil
}

func (w fakeConfigWriter) DeleteContext(name string) error {
	if w.config.CurrentContext == name {
		w.config.CurrentContext = ""
	}
	delete(w.config.Contexts, name)
	return nil
}

func (w fakeConfigWriter) SetConfig(name, value string) error {
	w.opts[name] = value
	return nil
}
0707010000005F000081A400000000000000000000000168AFB0EA00000396000000000000000000000000000000000000002400000000ctlptl-0.8.43/pkg/cluster/config.gopackage cluster

import (
	"os/exec"

	"k8s.io/cli-runtime/pkg/genericclioptions"
)

type configWriter interface {
	SetContext(name string) error
	DeleteContext(name string) error
	SetConfig(name, value string) error
}

type kubeconfigWriter struct {
	iostreams genericclioptions.IOStreams
}

func (w kubeconfigWriter) SetContext(name string) error {
	cmd := exec.Command("kubectl", "config", "use-context", name)
	cmd.Stdout = w.iostreams.Out
	cmd.Stderr = w.iostreams.ErrOut
	return cmd.Run()
}

func (w kubeconfigWriter) DeleteContext(name string) error {
	cmd := exec.Command("kubectl", "config", "delete-context", name)
	cmd.Stdout = w.iostreams.Out
	cmd.Stderr = w.iostreams.ErrOut
	return cmd.Run()
}

func (w kubeconfigWriter) SetConfig(name, value string) error {
	cmd := exec.Command("kubectl", "config", "set", name, value)
	cmd.Stdout = w.iostreams.Out
	cmd.Stderr = w.iostreams.ErrOut
	return cmd.Run()
}
07070100000060000081A400000000000000000000000168AFB0EA0000064F000000000000000000000000000000000000002400000000ctlptl-0.8.43/pkg/cluster/docker.gopackage cluster

import (
	"context"
	"os"
	"regexp"

	"github.com/tilt-dev/ctlptl/internal/dctr"
)

const (
	shortLen = 12
)

var (
	validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
)

// IsShortID determines if id has the correct format and length for a short ID.
// It checks the IDs length and if it consists of valid characters for IDs (a-f0-9).
//
// Deprecated: this function is no longer used, and will be removed in the next release.
func isShortID(id string) bool {
	if len(id) != shortLen {
		return false
	}
	return validShortID.MatchString(id)
}

type detectInContainer interface {
	insideContainer(ctx context.Context) string
}

// InsideContainer checks the current host and docker client to see if we are
// running inside a container with a Docker-out-of-Docker-mounted socket. It
// checks if:
//
//   - The effective DOCKER_HOST is `/var/run/docker.sock`
//   - The hostname looks like a container "short id" and is a valid, running
//     container
//
// Returns a non-empty string representing the container ID if inside a container.
func insideContainer(ctx context.Context, client dctr.Client) string {
	// allows fake client to mock the result
	if detect, ok := client.(detectInContainer); ok {
		return detect.insideContainer(ctx)
	}

	if client.DaemonHost() != "unix:///var/run/docker.sock" {
		return ""
	}

	containerID, err := os.Hostname()
	if err != nil {
		return ""
	}

	if !isShortID(containerID) {
		return ""
	}

	container, err := client.ContainerInspect(ctx, containerID)
	if err != nil {
		return ""
	}

	if !container.State.Running {
		return ""
	}

	return containerID
}
07070100000061000081A400000000000000000000000168AFB0EA000033CB000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/cluster/docker_desktop.gopackage cluster

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"net"
	"net/http"
	"os"
	"os/exec"
	"runtime"
	"strconv"
	"strings"

	"github.com/pkg/errors"
	klog "k8s.io/klog/v2"
)

type ddProtocol int

const (
	// Pre DD 4.12
	ddProtocolV1 ddProtocol = iota

	// Post DD 4.12
	ddProtocolV2 = 1
)

type HTTPClient interface {
	Do(req *http.Request) (*http.Response, error)
}

// Uses the DockerDesktop GUI+Backend protocols to control DockerDesktop.
//
// There isn't an off-the-shelf library or documented protocol we can use
// for this, so we do the best we can.
type DockerDesktopClient struct {
	backendNativeClient HTTPClient
	backendClient       HTTPClient
}

func NewDockerDesktopClient() (DockerDesktopClient, error) {
	backendNativeSocketPaths, err := dockerDesktopBackendNativeSocketPaths()
	if err != nil {
		return DockerDesktopClient{}, err
	}

	backendNativeClient := &http.Client{
		Transport: &http.Transport{
			DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
				var lastErr error

				// Different versions of docker use different socket paths,
				// so return all of them and connect to the first one that
				// accepts a TCP dial.
				for _, socketPath := range backendNativeSocketPaths {
					conn, err := dialDockerDesktop(socketPath)
					if err == nil {
						return conn, nil
					}
					lastErr = err
				}
				return nil, lastErr
			},
		},
	}
	backendClient := &http.Client{
		Transport: &http.Transport{
			DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
				return dialDockerBackend()
			},
		},
	}
	return DockerDesktopClient{
		backendNativeClient: backendNativeClient,
		backendClient:       backendClient,
	}, nil
}

func (c DockerDesktopClient) Open(ctx context.Context) error {
	var err error
	switch runtime.GOOS {

	case "windows":
		return fmt.Errorf("Cannot auto-start Docker Desktop on Windows")

	case "darwin":
		_, err = os.Stat("/Applications/Docker.app")
		if err != nil {
			if os.IsNotExist(err) {
				return fmt.Errorf("Please install Docker for Desktop: https://www.docker.com/products/docker-desktop")
			}
			return err
		}
		cmd := exec.Command("open", "/Applications/Docker.app")
		err = cmd.Run()

	case "linux":
		cmd := exec.Command("systemctl", "--user", "start", "docker-desktop")
		err = cmd.Run()
	}

	if err != nil {
		return errors.Wrap(err, "starting Docker")
	}
	return nil
}

func (c DockerDesktopClient) Quit(ctx context.Context) error {
	var err error
	switch runtime.GOOS {
	case "windows":
		return fmt.Errorf("Cannot quit Docker Desktop on Windows")

	case "darwin":
		cmd := exec.Command("osascript", "-e", `quit app "Docker"`)
		err = cmd.Run()

	case "linux":
		cmd := exec.Command("systemctl", "--user", "stop", "docker-desktop")
		err = cmd.Run()
	}

	if err != nil {
		return errors.Wrap(err, "quitting Docker")
	}
	return nil
}

func (c DockerDesktopClient) ResetCluster(ctx context.Context) error {
	resp, err := c.tryRequests("reset docker-desktop kubernetes", []clientRequest{
		{
			client:  c.backendClient,
			method:  "POST",
			url:     "http://localhost/kubernetes/reset",
			headers: map[string]string{"Content-Type": "application/json"},
		},
		{
			client:  c.backendNativeClient,
			method:  "POST",
			url:     "http://localhost/kubernetes/reset",
			headers: map[string]string{"Content-Type": "application/json"},
		},
	})
	if err != nil {
		return err
	}
	_ = resp.Body.Close()
	return nil
}

func (c DockerDesktopClient) SettingsValues(ctx context.Context) (interface{}, error) {
	s, err := c.settings(ctx)
	if err != nil {
		return nil, err
	}
	return c.settingsForWrite(s, ddProtocolV1), nil
}

func (c DockerDesktopClient) SetSettingValue(ctx context.Context, key, newValue string) error {
	settings, err := c.settings(ctx)
	if err != nil {
		return err
	}

	changed, err := c.applySet(settings, key, newValue)
	if err != nil {
		return err
	}
	if !changed {
		return nil
	}
	return c.writeSettings(ctx, settings)
}

// Returns true if the value changed, false if the value is unchanged.
// Returns an error if not able to set.
func (c DockerDesktopClient) applySet(settings map[string]interface{}, key, newValue string) (bool, error) {
	parts := strings.Split(key, ".")
	if len(parts) <= 1 {
		return false, fmt.Errorf("key cannot be set: %s", key)
	}

	parentKey := strings.Join(parts[:len(parts)-1], ".")
	childKey := parts[len(parts)-1]
	parentSpec, err := c.lookupMapAt(settings, parentKey)
	if err != nil {
		return false, err
	}

	// In Docker Desktop, a boolean setting can be stored in one of two formats:
	//
	// {"kubernetes": {"enabled": true}}
	// {"kubernetes": {"enabled": {"value": true}}}
	//
	// To resolve this problem, we create some intermediate variables:
	// v - the value that we're replacing
	// vParent - the map owning the value we're replacing
	// vParentKey - the key where v lives in vParent
	v, ok := parentSpec[childKey]
	if !ok {
		return false, fmt.Errorf("nothing found at DockerDesktop setting %q", key)
	}

	vParent := parentSpec
	vParentKey := childKey
	childMap, isMap := v.(map[string]interface{})
	if isMap {
		v = childMap["value"]
		vParent = childMap
		vParentKey = "value"
	}

	switch v := v.(type) {
	case bool:
		switch newValue {
		case "true":
			vParent[vParentKey] = true
			return !v, nil
		case "false":
			vParent[vParentKey] = false
			return v, nil
		}

		return false, fmt.Errorf("expected bool for setting %q, got: %s", key, newValue)

	case float64:
		newValFloat, err := strconv.ParseFloat(newValue, 64)
		if err != nil {
			return false, fmt.Errorf("expected number for setting %q, got: %s. Error: %v", key, newValue, err)
		}

		max, ok := vParent["max"].(float64)
		if ok && newValFloat > max {
			return false, fmt.Errorf("setting value %q: %s greater than max allowed (%f)", key, newValue, max)
		}
		min, ok := vParent["min"].(float64)
		if ok && newValFloat < min {
			return false, fmt.Errorf("setting value %q: %s less than min allowed (%f)", key, newValue, min)
		}

		if newValFloat != v {
			vParent[vParentKey] = newValFloat
			return true, nil
		}
		return false, nil
	case string:
		if newValue != v {
			vParent[vParentKey] = newValue
			return true, nil
		}
		return false, nil
	default:
		if key == "vm.fileSharing" {
			pathSpec := []map[string]interface{}{}
			paths := strings.Split(newValue, ",")
			for _, path := range paths {
				pathSpec = append(pathSpec, map[string]interface{}{"path": path, "cached": false})
			}

			vParent[vParentKey] = pathSpec

			// Don't bother trying to optimize this.
			return true, nil
		}
	}

	return false, fmt.Errorf("Cannot set key: %q", key)
}

func (c DockerDesktopClient) settingsForWriteJSON(settings map[string]interface{}, v ddProtocol) ([]byte, error) {
	buf := bytes.NewBuffer(nil)
	err := json.NewEncoder(buf).Encode(c.settingsForWrite(settings, v))
	if err != nil {
		return nil, err
	}
	return buf.Bytes(), nil
}

func (c DockerDesktopClient) writeSettings(ctx context.Context, settings map[string]interface{}) error {
	v2Body, err := c.settingsForWriteJSON(settings, ddProtocolV2)
	if err != nil {
		return errors.Wrap(err, "writing docker-desktop settings")
	}
	v1Body, err := c.settingsForWriteJSON(settings, ddProtocolV1)
	if err != nil {
		return errors.Wrap(err, "writing docker-desktop settings")
	}
	resp, err := c.tryRequests("writing docker-desktop settings", []clientRequest{
		{
			client:  c.backendClient,
			method:  "POST",
			url:     "http://localhost/app/settings",
			headers: map[string]string{"Content-Type": "application/json"},
			body:    v2Body,
		},
		{
			client:  c.backendNativeClient,
			method:  "POST",
			url:     "http://localhost/settings",
			headers: map[string]string{"Content-Type": "application/json"},
			body:    v1Body,
		},
	})
	if err != nil {
		return err
	}
	_ = resp.Body.Close()
	return nil
}

func (c DockerDesktopClient) settings(ctx context.Context) (map[string]interface{}, error) {
	resp, err := c.tryRequests("reading docker-desktop settings", []clientRequest{
		{
			client: c.backendClient,
			method: "GET",
			url:    "http://localhost/app/settings?format=grouped",
		},
		{
			client: c.backendNativeClient,
			method: "GET",
			url:    "http://localhost/settings",
		},
	})
	if err != nil {
		return nil, err
	}
	defer func() {
		_ = resp.Body.Close()
	}()

	settings := make(map[string]interface{})
	err = json.NewDecoder(resp.Body).Decode(&settings)
	if err != nil {
		return nil, errors.Wrap(err, "reading docker-desktop settings")
	}
	klog.V(8).Infof("Response body: %+v\n", settings)
	return settings, nil
}

func (c DockerDesktopClient) lookupMapAt(settings map[string]interface{}, key string) (map[string]interface{}, error) {
	parts := strings.Split(key, ".")
	current := settings
	for i, part := range parts {
		var ok bool
		val := current[part]
		current, ok = val.(map[string]interface{})
		if !ok {
			if val == nil {
				return nil, fmt.Errorf("nothing found at DockerDesktop setting %q",
					strings.Join(parts[:i+1], "."))
			}
			return nil, fmt.Errorf("expected map at DockerDesktop setting %q, got: %T",
				strings.Join(parts[:i+1], "."), val)
		}
	}
	return current, nil
}

func (c DockerDesktopClient) setK8sEnabled(settings map[string]interface{}, newVal bool) (changed bool, err error) {
	return c.applySet(settings, "vm.kubernetes.enabled", fmt.Sprintf("%v", newVal))
}

func (c DockerDesktopClient) ensureMinCPU(settings map[string]interface{}, desired int) (changed bool, err error) {
	cpusSetting, err := c.lookupMapAt(settings, "vm.resources.cpus")
	if err != nil {
		return false, err
	}

	value, ok := cpusSetting["value"].(float64)
	if !ok {
		return false, fmt.Errorf("expected number at DockerDesktop setting vm.resources.cpus.value, got: %T",
			cpusSetting["value"])
	}
	max, ok := cpusSetting["max"].(float64)
	if !ok {
		return false, fmt.Errorf("expected number at DockerDesktop setting vm.resources.cpus.max, got: %T",
			cpusSetting["max"])
	}

	if desired > int(max) {
		return false, fmt.Errorf("desired cpus (%d) greater than max allowed (%d)", desired, int(max))
	}

	if desired <= int(value) {
		return false, nil
	}

	cpusSetting["value"] = desired
	return true, nil
}

func (c DockerDesktopClient) settingsForWrite(settings interface{}, v ddProtocol) interface{} {
	settingsMap, ok := settings.(map[string]interface{})
	if !ok {
		return settings
	}

	_, hasLocked := settingsMap["locked"]
	value, hasValue := settingsMap["value"]
	if hasLocked && hasValue {
		// In the old protocol, we only sent the value back. In the new protocol,
		// we send the whole struct.
		if v == ddProtocolV1 {
			return value
		} else {
			return settingsMap
		}
	}

	if hasLocked && len(settingsMap) == 1 {
		return nil
	}

	_, hasLocks := settingsMap["locks"]
	json, hasJSON := settingsMap["json"]
	if hasLocks && hasJSON {
		return json
	}

	for key, value := range settingsMap {
		newVal := c.settingsForWrite(value, v)
		if newVal != nil {
			settingsMap[key] = newVal
		} else {
			delete(settingsMap, key)
		}
	}

	return settings
}

type clientRequest struct {
	client  HTTPClient
	method  string
	url     string
	headers map[string]string
	body    []byte
}

func status2xx(resp *http.Response) bool {
	return resp.StatusCode >= 200 && resp.StatusCode <= 204
}

type withStatusCode struct {
	error
	statusCode int
}

func (w withStatusCode) Cause() error { return w.error }

// tryRequest either returns a 2xx response or an error, but not both.
// If a response is returned, the caller must close its body.
func (c DockerDesktopClient) tryRequest(label string, creq clientRequest) (*http.Response, error) {
	klog.V(7).Infof("%s %s\n", creq.method, creq.url)

	body := []byte{}
	if creq.body != nil {
		body = creq.body
		klog.V(8).Infof("Request body: %s\n", string(body))
	}
	req, err := http.NewRequest(creq.method, creq.url, bytes.NewReader(body))
	if err != nil {
		return nil, errors.Wrap(err, label)
	}

	for k, v := range creq.headers {
		req.Header.Add(k, v)
	}

	resp, err := creq.client.Do(req)
	if err != nil {
		return nil, errors.Wrap(err, label)
	}
	if !status2xx(resp) {
		_ = resp.Body.Close()
		return nil, withStatusCode{errors.Errorf("%s: status code %d", label, resp.StatusCode), resp.StatusCode}
	}

	return resp, nil
}

func errorPriority(err error) int {
	switch e := err.(type) {
	case withStatusCode:
		return e.statusCode / 100
	default: // give actual errors higher priority than non-2xx status codes
		return 10
	}
}

func chooseWorstError(errs []error) error {
	err := errs[0]
	prio := errorPriority(err)
	for _, e := range errs[1:] {
		if p := errorPriority(e); p > prio {
			err = e
			prio = p
		}
	}
	return err
}

// tryRequests returns the first 2xx response for the given requests, in order,
// or the "highest priority" error (based on errorPriority) from response
// errors. If a response is returned, the caller must close its body.
func (c DockerDesktopClient) tryRequests(label string, requests []clientRequest) (*http.Response, error) {
	if len(requests) == 0 {
		panic(fmt.Sprintf("%s: no requests provided", label))
	}

	errs := []error{}
	for _, creq := range requests {
		resp, err := c.tryRequest(label, creq)
		if err == nil {
			return resp, nil
		}
		errs = append(errs, err)
	}
	return nil, chooseWorstError(errs)
}
07070100000062000081A400000000000000000000000168AFB0EA000004CB000000000000000000000000000000000000003100000000ctlptl-0.8.43/pkg/cluster/docker_desktop_dial.go//go:build !windows
// +build !windows

package cluster

import (
	"fmt"
	"net"
	"path/filepath"
	"runtime"

	"github.com/mitchellh/go-homedir"
)

func dockerDesktopBackendNativeSocketPaths() ([]string, error) {
	socketDir, err := dockerDesktopSocketDir()
	if err != nil {
		return nil, err
	}

	return []string{
		// Newer versions of docker desktop use this socket.
		filepath.Join(socketDir, "backend.native.sock"),

		// Older versions of docker desktop use this socket.
		filepath.Join(socketDir, "gui-api.sock"),
	}, nil
}

func dialDockerDesktop(socketPath string) (net.Conn, error) {
	return net.Dial("unix", socketPath)
}

func dialDockerBackend() (net.Conn, error) {
	socketDir, err := dockerDesktopSocketDir()
	if err != nil {
		return nil, err
	}
	return dialDockerDesktop(filepath.Join(socketDir, "backend.sock"))
}

func dockerDesktopSocketDir() (string, error) {
	homedir, err := homedir.Dir()
	if err != nil {
		return "", err
	}

	switch runtime.GOOS {
	case "darwin":
		return filepath.Join(homedir, "Library/Containers/com.docker.docker/Data"), nil
	case "linux":
		return filepath.Join(homedir, ".docker/desktop"), nil
	}
	return "", fmt.Errorf("Cannot find docker desktop directory on %s", runtime.GOOS)
}
07070100000063000081A400000000000000000000000168AFB0EA00000306000000000000000000000000000000000000003900000000ctlptl-0.8.43/pkg/cluster/docker_desktop_dial_windows.go//go:build windows
// +build windows

package cluster

import (
	"net"
	"os"
	"time"

	"gopkg.in/natefinch/npipe.v2"
)

func dockerDesktopBackendNativeSocketPaths() ([]string, error) {
	return []string{
		`\\.\pipe\dockerBackendNativeApiServer`,
		`\\.\pipe\dockerWebApiServer`,
	}, nil
}

// Use npipe.Dial to create a connection.
//
// npipe.Dial will wait if the socket doesn't exist. Stat it first and
// dial on a timeout.
//
// https://github.com/natefinch/npipe#func-dial
func dialDockerDesktop(socketPath string) (net.Conn, error) {
	_, err := os.Stat(socketPath)
	if err != nil {
		return nil, err
	}
	return npipe.DialTimeout(socketPath, 2*time.Second)
}

func dialDockerBackend() (net.Conn, error) {
	return dialDockerDesktop(`\\.\pipe\dockerBackendApiServer`)
}
07070100000064000081A400000000000000000000000168AFB0EA00003DF9000000000000000000000000000000000000003100000000ctlptl-0.8.43/pkg/cluster/docker_desktop_test.gopackage cluster

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"strconv"
	"strings"
	"testing"

	"github.com/pkg/errors"
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
)

func TestNoOp(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	err = f.d4m.writeSettings(ctx, settings)
	require.NoError(t, err)

	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(postSettingsJSONV2)))
}

func TestEnableKubernetesV1(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	f.v = ddProtocolV1

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	changed, err := f.d4m.setK8sEnabled(settings, true)
	assert.True(t, changed)
	require.NoError(t, err)

	err = f.d4m.writeSettings(ctx, settings)
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV1,
		`"kubernetes":{"enabled":false`,
		`"kubernetes":{"enabled":true`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))
}

func TestEnableKubernetesV2(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	changed, err := f.d4m.setK8sEnabled(settings, true)
	assert.True(t, changed)
	require.NoError(t, err)

	err = f.d4m.writeSettings(ctx, settings)
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV2,
		`"kubernetes":{"enabled":{"locked":false,"value":false`,
		`"kubernetes":{"enabled":{"locked":false,"value":true`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))
}

func TestMinCPUs(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	changed, err := f.d4m.ensureMinCPU(settings, 4)
	assert.True(t, changed)
	require.NoError(t, err)

	err = f.d4m.writeSettings(ctx, settings)
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV2,
		`"cpus":{"max":8,"min":1,"value":2}`,
		`"cpus":{"max":8,"min":1,"value":4}`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))
}

func TestMaxCPUs(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	changed, err := f.d4m.ensureMinCPU(settings, 40)
	assert.False(t, changed)
	if assert.Error(t, err) {
		assert.Equal(t, err.Error(), "desired cpus (40) greater than max allowed (8)")
	}
}

func TestLookupMap(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	settings, err := f.d4m.settings(ctx)
	require.NoError(t, err)

	_, err = f.d4m.lookupMapAt(settings, "vm.kubernetes.honk")
	if assert.Error(t, err) {
		assert.Equal(t, err.Error(), `nothing found at DockerDesktop setting "vm.kubernetes.honk"`)
	}
}

func TestSetSettingValueInvalidKey(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.doesNotExist", "4")
	if assert.Error(t, err) {
		assert.Equal(t, err.Error(), `nothing found at DockerDesktop setting "vm.doesNotExist"`)
	}
}

func TestSetSettingValueInvalidSet(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.resources.cpus.value.doesNotExist", "4")
	if assert.Error(t, err) {
		assert.Equal(t, err.Error(), `expected map at DockerDesktop setting "vm.resources.cpus.value", got: float64`)
	}
}

func TestSetSettingValueFloat(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.resources.cpus", "4")
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV2,
		`"cpus":{"max":8,"min":1,"value":2}`,
		`"cpus":{"max":8,"min":1,"value":4}`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))

	f.postSettings = nil
	err = f.d4m.SetSettingValue(ctx, "vm.resources.cpus", "2")
	require.NoError(t, err)
	assert.Nil(t, f.postSettings)
}

func TestSetSettingValueFloatLimit(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.resources.cpus", "100")
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), `setting value "vm.resources.cpus": 100 greater than max allowed`)
	}
	err = f.d4m.SetSettingValue(ctx, "vm.resources.cpus", "0")
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), `setting value "vm.resources.cpus": 0 less than min allowed`)
	}
}

func TestSetSettingValueBoolV1(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()
	f.v = ddProtocolV1

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.kubernetes.enabled", "true")
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV1,
		`"enabled":false,`,
		`"enabled":true,`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))

	f.postSettings = nil
	err = f.d4m.SetSettingValue(ctx, "vm.kubernetes.enabled", "false")
	require.NoError(t, err)
	assert.Nil(t, f.postSettings)
}

func TestSetSettingValueBoolV2(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.kubernetes.enabled", "true")
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV2,
		`"kubernetes":{"enabled":{"locked":false,"value":false`,
		`"kubernetes":{"enabled":{"locked":false,"value":true`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))

	f.postSettings = nil
	err = f.d4m.SetSettingValue(ctx, "vm.kubernetes.enabled", "false")
	require.NoError(t, err)
	assert.Nil(t, f.postSettings)
}

func TestSetSettingValueFileSharing(t *testing.T) {
	f := newD4MFixture(t)
	defer f.TearDown()

	ctx := context.Background()
	err := f.d4m.SetSettingValue(ctx, "vm.fileSharing", "/x,/y")
	require.NoError(t, err)

	expected := strings.Replace(postSettingsJSONV2,
		`"fileSharing":[{"cached":false,"path":"/home"}]`,
		`"fileSharing":[{"cached":false,"path":"/x"}, {"cached":false,"path":"/y"}]`, 1)
	assert.Equal(t,
		f.postSettings,
		f.readerToMap(strings.NewReader(expected)))

}

func TestChooseWorstError(t *testing.T) {
	tt := []struct {
		expected string
		errors   []error
	}{
		{
			"connection error",
			[]error{
				errors.Wrap(errors.New("connection error"), ""),
				withStatusCode{errors.New("404 error"), 404},
			},
		},
		{
			"500 error",
			[]error{
				withStatusCode{errors.New("500 error"), 500},
				withStatusCode{errors.New("404 error"), 404},
			},
		},
		{
			"first error",
			[]error{
				errors.Wrap(errors.New("first error"), ""),
				errors.Wrap(errors.New("second error"), ""),
			},
		},
	}

	for i, tc := range tt {
		t.Run(strconv.Itoa(i)+" "+tc.expected, func(t *testing.T) {
			err := chooseWorstError(tc.errors)
			assert.EqualError(t, errors.Cause(err), tc.expected)
		})
	}
}

// Pre DD 4.12
var getSettingsJSONV1 = `{"vm":{"proxy":{"exclude":{"value":"","locked":false},"http":{"value":"","locked":false},"https":{"value":"","locked":false},"mode":{"value":"system","locked":false}},"daemon":{"locks":[],"json":"{\"debug\":true,\"experimental\":false}"},"resources":{"cpus":{"value":2,"min":1,"locked":false,"max":8},"memoryMiB":{"value":8192,"min":1024,"locked":false,"max":16384},"diskSizeMiB":{"value":61035,"used":18486,"locked":false},"dataFolder":{"value":"\/Users\/nick\/Library\/Containers\/com.docker.docker\/Data\/vms\/0\/data","locked":false},"swapMiB":{"value":1024,"min":512,"locked":false,"max":4096}},"fileSharing":{"value":[{"path":"\/Users","cached":false},{"path":"\/Volumes","cached":false},{"path":"\/private","cached":false},{"path":"\/tmp","cached":false}],"locked":false},"kubernetes":{"enabled":{"value":false,"locked":false},"stackOrchestrator":{"value":false,"locked":false},"showSystemContainers":{"value":false,"locked":false}},"network":{"dns":{"locked":false},"vpnkitCIDR":{"value":"192.168.65.0\/24","locked":false},"automaticDNS":{"value":true,"locked":false}}},"desktop":{"exportInsecureDaemon":{"value":false,"locked":false},"useGrpcfuse":{"value":true,"locked":false},"backupData":{"value":false,"locked":false},"checkForUpdates":{"value":true,"locked":false},"useCredentialHelper":{"value":true,"locked":false},"autoStart":{"value":false,"locked":false},"analyticsEnabled":{"value":true,"locked":false}},"wslIntegration":{"distros":{"value":[],"locked":false},"enableIntegrationWithDefaultWslDistro":{"value":false,"locked":false}},"cli":{"useCloudCli":{"value":true,"locked":false},"experimental":{"value":true,"locked":false}}}`

// Post DD 4.12
var getSettingsJSONV2 = `{"vm":{"proxy":{"exclude":{"value":"","locked":false},"http":{"value":"","locked":false},"https":{"value":"","locked":false},"mode":{"value":"system","locked":false}},"daemon":{"value":"{\"builder\":{\"gc\":{\"defaultKeepStorage\":\"20GB\",\"enabled\":true}},\"experimental\":false,\"features\":{\"buildkit\":true}}","locked":false},"resources":{"cpus":{"value":2,"min":1,"max":8},"memoryMiB":{"value":5120,"min":1024,"max":15627},"diskSizeMiB":{"value":65536},"dataFolder":"/home/nick/.docker/desktop/vms/0/data","swapMiB":{"value":1536,"min":512,"max":4096}},"fileSharing":[{"path":"/home","cached":false}],"kubernetes":{"enabled":{"value":false,"locked":false},"showSystemContainers":{"value":false,"locked":false},"installed":true},"network":{"automaticDNS":false,"DNS":"","socksProxyPort":0,"vpnkitCIDR":{"value":"192.168.65.0/24","locked":false}}},"desktop":{"autoStart":false,"tipLastId":30,"exportInsecureDaemon":{"value":false,"locked":false},"disableTips":true,"analyticsEnabled":{"value":true,"locked":false},"enhancedContainerIsolation":{"value":false,"locked":false},"backupData":false,"tipLastViewedTime":1667005050000,"useVirtualizationFrameworkVirtioFS":true,"useVirtualizationFramework":false,"canUseVirtualizationFrameworkVirtioFS":false,"canUseVirtualizationFramework":false,"mustDisplayVirtualizationFrameworkSwitch":false,"disableHardwareAcceleration":false,"disableUpdate":{"value":false,"locked":false},"autoDownloadUpdates":{"value":false,"locked":false},"useNightlyBuildUpdates":{"value":false,"locked":false},"useVpnkit":true,"openUIOnStartupDisabled":false,"updateAvailableTime":0,"updateInstallTime":0,"useCredentialHelper":true,"displayedTutorial":true,"themeSource":"system","containerTerminal":"integrated","useContainerdSnapshotter":false,"allowExperimentalFeatures":true,"enableSegmentDebug":false,"wslEngineEnabled":{"value":false,"locked":false},"wslEnableGrpcfuse":false,"wslPreconditionMessage":"","noWindowsContainers":false,"useBackgroundIndexing":true},"cli":{"useComposeV2":false,"useGrpcfuse":true},"vpnkit":{"maxConnections":0,"maxPortIdleTime":300,"MTU":0,"allowedBindAddresses":"","transparentProxy":false},"extensions":{"enabled":true,"onlyMarketplaceExtensions":false,"showSystemContainers":true},"wslIntegration":{"distros":[],"enableIntegrationWithDefaultWslDistro":false}}`

// Pre DD 4.12
var postSettingsJSONV1 = `{"desktop":{"exportInsecureDaemon":false,"useGrpcfuse":true,"backupData":false,"checkForUpdates":true,"useCredentialHelper":true,"autoStart":false,"analyticsEnabled":true},"cli":{"useCloudCli":true,"experimental":true},"vm":{"daemon":"{\"debug\":true,\"experimental\":false}","fileSharing":[{"path":"/Users","cached":false},{"path":"/Volumes","cached":false},{"path":"/private","cached":false},{"path":"/tmp","cached":false}],"kubernetes":{"enabled":false,"stackOrchestrator":false,"showSystemContainers":false},"network":{"vpnkitCIDR":"192.168.65.0/24","automaticDNS":true},"proxy":{"exclude":"","http":"","https":"","mode":"system"},"resources":{"cpus":2,"memoryMiB":8192,"diskSizeMiB":61035,"dataFolder":"/Users/nick/Library/Containers/com.docker.docker/Data/vms/0/data","swapMiB":1024}},"wslIntegration":{"distros":[],"enableIntegrationWithDefaultWslDistro":false}}`

// Post DD 4.12
var postSettingsJSONV2 = `{"cli":{"useComposeV2":false,"useGrpcfuse":true},"desktop":{"allowExperimentalFeatures":true,"analyticsEnabled":{"locked":false,"value":true},"autoDownloadUpdates":{"locked":false,"value":false},"autoStart":false,"backupData":false,"canUseVirtualizationFramework":false,"canUseVirtualizationFrameworkVirtioFS":false,"containerTerminal":"integrated","disableHardwareAcceleration":false,"disableTips":true,"disableUpdate":{"locked":false,"value":false},"displayedTutorial":true,"enableSegmentDebug":false,"enhancedContainerIsolation":{"locked":false,"value":false},"exportInsecureDaemon":{"locked":false,"value":false},"mustDisplayVirtualizationFrameworkSwitch":false,"noWindowsContainers":false,"openUIOnStartupDisabled":false,"themeSource":"system","tipLastId":30,"tipLastViewedTime":1667005050000,"updateAvailableTime":0,"updateInstallTime":0,"useBackgroundIndexing":true,"useContainerdSnapshotter":false,"useCredentialHelper":true,"useNightlyBuildUpdates":{"locked":false,"value":false},"useVirtualizationFramework":false,"useVirtualizationFrameworkVirtioFS":true,"useVpnkit":true,"wslEnableGrpcfuse":false,"wslEngineEnabled":{"locked":false,"value":false},"wslPreconditionMessage":""},"extensions":{"enabled":true,"onlyMarketplaceExtensions":false,"showSystemContainers":true},"vm":{"daemon":{"locked":false,"value":"{\"builder\":{\"gc\":{\"defaultKeepStorage\":\"20GB\",\"enabled\":true}},\"experimental\":false,\"features\":{\"buildkit\":true}}"},"fileSharing":[{"cached":false,"path":"/home"}],"kubernetes":{"enabled":{"locked":false,"value":false},"installed":true,"showSystemContainers":{"locked":false,"value":false}},"network":{"DNS":"","automaticDNS":false,"socksProxyPort":0,"vpnkitCIDR":{"locked":false,"value":"192.168.65.0/24"}},"proxy":{"exclude":{"locked":false,"value":""},"http":{"locked":false,"value":""},"https":{"locked":false,"value":""},"mode":{"locked":false,"value":"system"}},"resources":{"cpus":{"max":8,"min":1,"value":2},"dataFolder":"/home/nick/.docker/desktop/vms/0/data","diskSizeMiB":{"value":65536},"memoryMiB":{"max":15627,"min":1024,"value":5120},"swapMiB":{"max":4096,"min":512,"value":1536}}},"vpnkit":{"MTU":0,"allowedBindAddresses":"","maxConnections":0,"maxPortIdleTime":300,"transparentProxy":false},"wslIntegration":{"distros":[],"enableIntegrationWithDefaultWslDistro":false}}`

type d4mFixture struct {
	t            *testing.T
	d4m          *DockerDesktopClient
	postSettings map[string]interface{}
	v            ddProtocol
}

func newD4MFixture(t *testing.T) *d4mFixture {
	f := &d4mFixture{t: t}
	f.v = ddProtocolV2
	f.d4m = &DockerDesktopClient{backendNativeClient: f, backendClient: f}
	return f
}

func (f *d4mFixture) readerToMap(r io.Reader) map[string]interface{} {
	result := make(map[string]interface{})
	err := json.NewDecoder(r).Decode(&result)
	require.NoError(f.t, err)
	return result
}

func (f *d4mFixture) Do(r *http.Request) (*http.Response, error) {
	settings := getSettingsJSONV2
	if f.v == ddProtocolV2 {
		require.Equal(f.t, r.URL.Path, "/app/settings")
	} else {
		if r.URL.Path == "/app/settings" {
			// Simulate an error so that we try the old endpoint.
			return nil, fmt.Errorf("Mock using V1, /app/settings endpoint doesn't exist")
		}
		require.Equal(f.t, r.URL.Path, "/settings")
		settings = getSettingsJSONV1
	}
	if r.Method == "POST" {
		f.postSettings = f.readerToMap(r.Body)

		return &http.Response{
			StatusCode: http.StatusCreated,
			Body:       closeReader{strings.NewReader("")},
		}, nil
	}

	return &http.Response{
		StatusCode: http.StatusOK,
		Body:       closeReader{strings.NewReader(settings)},
	}, nil
}

func (f *d4mFixture) TearDown() {
}

type closeReader struct {
	io.Reader
}

func (c closeReader) Close() error { return nil }
07070100000065000081A400000000000000000000000168AFB0EA00001E3A000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/cluster/machine.gopackage cluster

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"os"
	"path/filepath"
	"runtime"
	"time"

	"github.com/mitchellh/go-homedir"
	"github.com/pkg/errors"
	"k8s.io/apimachinery/pkg/util/duration"
	"k8s.io/apimachinery/pkg/util/wait"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	klog "k8s.io/klog/v2"

	"github.com/tilt-dev/clusterid"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	cexec "github.com/tilt-dev/ctlptl/internal/exec"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/docker"
)

type Machine interface {
	CPUs(ctx context.Context) (int, error)
	EnsureExists(ctx context.Context) error
	Restart(ctx context.Context, desired, existing *api.Cluster) error
}

type unknownMachine struct {
	product clusterid.Product
}

func (m unknownMachine) EnsureExists(ctx context.Context) error {
	return fmt.Errorf("cluster type %s not configurable", m.product)
}

func (m unknownMachine) CPUs(ctx context.Context) (int, error) {
	return 0, nil
}

func (m unknownMachine) Restart(ctx context.Context, desired, existing *api.Cluster) error {
	return fmt.Errorf("cluster type %s not configurable", desired.Product)
}

type sleeper func(dur time.Duration)

type d4mClient interface {
	writeSettings(ctx context.Context, settings map[string]interface{}) error
	settings(ctx context.Context) (map[string]interface{}, error)
	ResetCluster(tx context.Context) error
	setK8sEnabled(settings map[string]interface{}, desired bool) (bool, error)
	ensureMinCPU(settings map[string]interface{}, desired int) (bool, error)
	Open(ctx context.Context) error
}

type dockerMachine struct {
	iostreams    genericclioptions.IOStreams
	dockerClient dctr.Client
	sleep        sleeper
	d4m          d4mClient
	os           string
}

func NewDockerMachine(ctx context.Context, client dctr.Client, iostreams genericclioptions.IOStreams) (*dockerMachine, error) {
	d4m, err := NewDockerDesktopClient()
	if err != nil {
		return nil, err
	}

	return &dockerMachine{
		dockerClient: client,
		iostreams:    iostreams,
		sleep:        time.Sleep,
		d4m:          d4m,
		os:           runtime.GOOS,
	}, nil
}

func (m *dockerMachine) CPUs(ctx context.Context) (int, error) {
	info, err := m.dockerClient.Info(ctx)
	if err != nil {
		return 0, err
	}
	return info.NCPU, nil
}

func (m *dockerMachine) EnsureExists(ctx context.Context) error {
	_, err := m.dockerClient.ServerVersion(ctx)
	if err == nil {
		return nil
	}

	host := m.dockerClient.DaemonHost()

	// If we are connecting to local desktop, we can try to start it.
	// Otherwise, we just error.
	if !docker.IsLocalDockerDesktop(host, m.os) {
		return fmt.Errorf("Not connected to Docker Engine. Host: %q. Error: %v",
			host, err)
	}

	klog.V(2).Infoln("No Docker Desktop running. Attempting to start Docker.")
	err = m.d4m.Open(ctx)
	if err != nil {
		return err
	}

	dur := 60 * time.Second
	_, _ = fmt.Fprintf(m.iostreams.ErrOut, "Waiting %s for Docker Desktop to boot...\n", duration.ShortHumanDuration(dur))
	err = wait.PollUntilContextTimeout(ctx, time.Second, dur, true, func(ctx context.Context) (bool, error) {
		_, err := m.dockerClient.ServerVersion(ctx)
		isSuccess := err == nil
		return isSuccess, nil
	})
	if err != nil {
		return fmt.Errorf("timed out waiting for Docker to start")
	}
	klog.V(2).Infoln("Docker started successfully")
	return nil
}

func (m *dockerMachine) Restart(ctx context.Context, desired, existing *api.Cluster) error {
	canChangeCPUs := false
	isLocalDockerDesktop := false
	if docker.IsLocalDockerDesktop(m.dockerClient.DaemonHost(), m.os) {
		canChangeCPUs = true // DockerForMac and DockerForWindows can change the CPU on the VM
		isLocalDockerDesktop = true
	} else if clusterid.Product(desired.Product) == clusterid.ProductMinikube {
		// Minikube can change the CPU on the VM or on the container itself
		canChangeCPUs = true
	}

	if existing.Status.CPUs < desired.MinCPUs && !canChangeCPUs {
		return fmt.Errorf("Cannot automatically set minimum CPU to %d on this platform", desired.MinCPUs)
	}

	if isLocalDockerDesktop {
		settings, err := m.d4m.settings(ctx)
		if err != nil {
			return err
		}

		k8sChanged := false
		if desired.Product == string(clusterid.ProductDockerDesktop) {
			k8sChanged, err = m.d4m.setK8sEnabled(settings, true)
			if err != nil {
				return err
			}
		}

		cpuChanged, err := m.d4m.ensureMinCPU(settings, desired.MinCPUs)
		if err != nil {
			return err
		}

		if k8sChanged || cpuChanged {
			err := m.d4m.writeSettings(ctx, settings)
			if err != nil {
				return err
			}

			dur := 120 * time.Second
			_, _ = fmt.Fprintf(m.iostreams.ErrOut,
				"Applied new Docker Desktop settings. Waiting %s for Docker Desktop to restart...\n",
				duration.ShortHumanDuration(dur))

			// Sleep for short time to ensure the write takes effect.
			m.sleep(2 * time.Second)

			err = wait.PollUntilContextTimeout(ctx, time.Second, dur, true, func(ctx context.Context) (bool, error) {
				_, err := m.dockerClient.ServerVersion(ctx)
				isSuccess := err == nil
				return isSuccess, nil
			})
			if err != nil {
				return errors.Wrap(err, "Docker Desktop restart timeout")
			}
		}
	}

	return nil
}

// Currently, out Minikube admin only supports Minikube on Docker,
// so we delegate to the dockerMachine driver.
type minikubeMachine struct {
	iostreams genericclioptions.IOStreams
	runner    cexec.CmdRunner
	dm        *dockerMachine
	name      string
}

func newMinikubeMachine(iostreams genericclioptions.IOStreams, runner cexec.CmdRunner, name string, dm *dockerMachine) *minikubeMachine {
	return &minikubeMachine{
		iostreams: iostreams,
		runner:    runner,
		name:      name,
		dm:        dm,
	}
}

type minikubeSettings struct {
	CPUs int
}

func (m *minikubeMachine) CPUs(ctx context.Context) (int, error) {
	homedir, err := homedir.Dir()
	if err != nil {
		return 0, err
	}
	configPath := filepath.Join(homedir, ".minikube", "profiles", m.name, "config.json")
	f, err := os.Open(configPath)
	if err != nil {
		return 0, err
	}
	defer func() {
		_ = f.Close()
	}()

	decoder := json.NewDecoder(f)
	settings := minikubeSettings{}
	err = decoder.Decode(&settings)
	if err != nil {
		return 0, err
	}
	return settings.CPUs, nil
}

func (m *minikubeMachine) EnsureExists(ctx context.Context) error {
	err := m.dm.EnsureExists(ctx)
	if err != nil {
		return err
	}

	m.startIfStopped(ctx)
	return nil
}

func (m *minikubeMachine) Restart(ctx context.Context, desired, existing *api.Cluster) error {
	return m.dm.Restart(ctx, desired, existing)
}

// Minikube is special because the "machine" can be stopped temporarily.
// Check to see if there's a stopped machine, and start it.
// Never return an error - if we can't proceed, we'll just restart from scratch.
func (m *minikubeMachine) startIfStopped(ctx context.Context) {
	out := bytes.NewBuffer(nil)

	// Ignore errors. `minikube status` returns a non-zero exit code when
	// the container has been stopped.
	_ = m.runner.RunIO(ctx, genericclioptions.IOStreams{Out: out, ErrOut: m.iostreams.ErrOut},
		"minikube", "status", "-p", m.name, "-o", "json")

	status := minikubeStatus{}
	decoder := json.NewDecoder(out)
	err := decoder.Decode(&status)
	if err != nil {
		return
	}

	// Handle 'minikube stop'
	if status.Host == "Stopped" {
		_, _ = fmt.Fprintf(m.iostreams.ErrOut, "Cluster %q exists but is stopped. Starting...\n", m.name)
		_ = m.runner.RunIO(ctx, m.iostreams, "minikube", "start", "-p", m.name)
		return
	}

	// Handle 'minikube pause'
	if status.APIServer == "Stopped" {
		_, _ = fmt.Fprintf(m.iostreams.ErrOut, "Cluster %q exists but is paused. Starting...\n", m.name)
		_ = m.runner.RunIO(ctx, m.iostreams, "minikube", "unpause", "-p", m.name)
		return
	}
}

type minikubeStatus struct {
	Host      string
	APIServer string
}
07070100000066000081A400000000000000000000000168AFB0EA000001FF000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/cluster/options.gopackage cluster

import (
	"k8s.io/apimachinery/pkg/fields"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

type ListOptions struct {
	FieldSelector string
}

type clusterFields api.Cluster

func (cf *clusterFields) Has(field string) bool {
	return field == "name" || field == "product"
}

func (cf *clusterFields) Get(field string) string {
	if field == "name" {
		return (*api.Cluster)(cf).Name
	}
	if field == "product" {
		return (*api.Cluster)(cf).Product
	}
	return ""
}

var _ fields.Fields = &clusterFields{}
07070100000067000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001600000000ctlptl-0.8.43/pkg/cmd07070100000068000081A400000000000000000000000168AFB0EA000001F7000000000000000000000000000000000000002300000000ctlptl-0.8.43/pkg/cmd/analytics.gopackage cmd

import (
	"runtime"

	"github.com/tilt-dev/wmclient/pkg/analytics"
)

var Version string

func newAnalytics() (analytics.Analytics, error) {
	return analytics.NewRemoteAnalytics(
		"ctlptl",
		analytics.WithLogger(discardLogger{}),
		analytics.WithGlobalTags(globalTags()))
}

func globalTags() map[string]string {
	return map[string]string{
		"version": Version,
		"os":      runtime.GOOS,
	}
}

type discardLogger struct{}

func (dl discardLogger) Printf(fmt string, v ...interface{}) {}
07070100000069000081A400000000000000000000000168AFB0EA00000AC0000000000000000000000000000000000000001F00000000ctlptl-0.8.43/pkg/cmd/apply.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"time"

	"github.com/spf13/cobra"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/cluster"
	"github.com/tilt-dev/ctlptl/pkg/registry"
	"github.com/tilt-dev/ctlptl/pkg/visitor"
)

type ApplyOptions struct {
	*genericclioptions.PrintFlags
	*genericclioptions.FileNameFlags
	genericclioptions.IOStreams

	Filenames []string
}

func NewApplyOptions() *ApplyOptions {
	o := &ApplyOptions{
		PrintFlags: genericclioptions.NewPrintFlags("created"),
		IOStreams:  genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
	}
	o.FileNameFlags = &genericclioptions.FileNameFlags{Filenames: &o.Filenames}
	return o
}

func (o *ApplyOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "apply -f FILENAME",
		Short: "Apply a cluster config to the currently running clusters",
		Example: "  ctlptl apply -f cluster.yaml\n" +
			"  cat cluster.yaml | ctlptl apply -f -",
		Run: o.Run,
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	o.FileNameFlags.AddFlags(cmd.Flags())
	o.PrintFlags.AddFlags(cmd)

	return cmd
}

func (o *ApplyOptions) Run(cmd *cobra.Command, args []string) {
	if len(o.Filenames) == 0 {
		_, _ = fmt.Fprintf(o.ErrOut, "Expected source files with -f")
		os.Exit(1)
	}

	err := o.run()
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}
}

func (o *ApplyOptions) run() error {
	a, err := newAnalytics()
	if err != nil {
		return err
	}
	a.Incr("cmd.apply", nil)
	defer a.Flush(time.Second)

	ctx := context.TODO()

	printer, err := o.ToPrinter()
	if err != nil {
		return err
	}

	visitors, err := visitor.FromStrings(o.Filenames, o.In)
	if err != nil {
		return err
	}

	objects, err := visitor.DecodeAll(visitors)
	if err != nil {
		return err
	}

	var cc *cluster.Controller
	var rc *registry.Controller
	for _, obj := range objects {
		switch obj := obj.(type) {
		case *api.Registry:
			if rc == nil {
				rc, err = registry.DefaultController(o.IOStreams)
				if err != nil {
					return err
				}
			}

			newObj, err := rc.Apply(ctx, obj)
			if err != nil {
				return err
			}

			err = printer.PrintObj(newObj, o.Out)
			if err != nil {
				return err
			}
		}
	}

	for _, obj := range objects {
		switch obj := obj.(type) {
		case *api.Cluster:
			if cc == nil {
				cc, err = cluster.DefaultController(o.IOStreams)
				if err != nil {
					return err
				}
			}

			newObj, err := cc.Apply(ctx, obj)
			if err != nil {
				return err
			}

			err = printer.PrintObj(newObj, o.Out)
			if err != nil {
				return err
			}

		case *api.Registry:
			// Handled above
			continue

		default:
			return fmt.Errorf("unrecognized type: %T", obj)
		}
	}
	return nil
}
0707010000006A000081A400000000000000000000000168AFB0EA00000374000000000000000000000000000000000000002000000000ctlptl-0.8.43/pkg/cmd/create.gopackage cmd

import (
	"os"

	"github.com/spf13/cobra"
	"k8s.io/cli-runtime/pkg/genericclioptions"
)

type CreateOptions struct {
	genericclioptions.IOStreams
}

func NewCreateOptions() *CreateOptions {
	o := &CreateOptions{
		IOStreams: genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
	}
	return o
}

func (o *CreateOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "create [cluster|registry]",
		Short: "Create a cluster or registry",
		Example: "  ctlptl create cluster docker-desktop\n" +
			"  ctlptl create cluster kind --registry=ctlptl-registry",
		Run: o.Run,
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	cmd.AddCommand(NewCreateClusterOptions().Command())
	cmd.AddCommand(NewCreateRegistryOptions().Command())

	return cmd
}

func (o *CreateOptions) Run(cmd *cobra.Command, args []string) {
	_ = cmd.Help()
	os.Exit(1)
}
0707010000006B000081A400000000000000000000000168AFB0EA00000ED4000000000000000000000000000000000000002800000000ctlptl-0.8.43/pkg/cmd/create_cluster.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"time"

	"github.com/google/go-cmp/cmp"
	"github.com/spf13/cobra"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/clusterid"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/cluster"
)

type CreateClusterOptions struct {
	*genericclioptions.PrintFlags
	genericclioptions.IOStreams

	Cluster *api.Cluster
}

func NewCreateClusterOptions() *CreateClusterOptions {
	o := &CreateClusterOptions{
		PrintFlags: genericclioptions.NewPrintFlags("created"),
		IOStreams:  genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
		Cluster: &api.Cluster{
			TypeMeta: cluster.TypeMeta(),
			Minikube: &api.MinikubeCluster{},
		},
	}
	return o
}

func (o *CreateClusterOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "cluster [product]",
		Short: "Create a cluster with the given local Kubernetes product",
		Example: "  ctlptl create cluster docker-desktop\n" +
			"  ctlptl create cluster kind --registry=ctlptl-registry",
		Run:  o.Run,
		Args: cobra.ExactArgs(1),
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	o.AddFlags(cmd)
	cmd.Flags().StringVar(&o.Cluster.Registry, "registry",
		o.Cluster.Registry, "Connect the cluster to the named registry")
	cmd.Flags().StringVar(&o.Cluster.Name, "name",
		o.Cluster.Name, "Names the context. If not specified, uses the default cluster name for this Kubernetes product")
	cmd.Flags().IntVar(&o.Cluster.MinCPUs, "min-cpus",
		o.Cluster.MinCPUs, "Sets the minimum CPUs for the cluster")
	cmd.Flags().StringVar(&o.Cluster.KubernetesVersion, "kubernetes-version",
		o.Cluster.KubernetesVersion, "Sets the kubernetes version for the cluster, if possible")
	cmd.Flags().StringSliceVar(&o.Cluster.Minikube.StartFlags, "minikube-start-flags",
		o.Cluster.Minikube.StartFlags, "Minikube extra start flags (only applicable to a minikube cluster)")
	cmd.Flags().StringSliceVar(&o.Cluster.Minikube.ExtraConfigs, "minikube-extra-configs",
		o.Cluster.Minikube.ExtraConfigs, "Minikube extra configs (only applicable to a minikube cluster)")
	cmd.Flags().StringVar(&o.Cluster.Minikube.ContainerRuntime, "minikube-container-runtime",
		o.Cluster.Minikube.ContainerRuntime, "Minikube container runtime (only applicable to a minikube cluster)")

	return cmd
}

func (o *CreateClusterOptions) Run(cmd *cobra.Command, args []string) {
	controller, err := cluster.DefaultController(o.IOStreams)
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}

	err = o.run(controller, args[0])
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}
}

type clusterCreator interface {
	Apply(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error)
	Get(ctx context.Context, name string) (*api.Cluster, error)
}

func (o *CreateClusterOptions) run(controller clusterCreator, product string) error {
	a, err := newAnalytics()
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}
	a.Incr("cmd.create.cluster", nil)
	defer a.Flush(time.Second)

	o.Cluster.Product = product

	// Zero out the minikube config if not used.
	if product != string(clusterid.ProductMinikube) || cmp.Equal(o.Cluster.Minikube, &api.MinikubeCluster{}) {
		o.Cluster.Minikube = nil
	}

	cluster.FillDefaults(o.Cluster)

	ctx := context.Background()
	_, err = controller.Get(ctx, o.Cluster.Name)
	if err == nil {
		return fmt.Errorf("Cannot create cluster: already exists")
	} else if err != nil && !errors.IsNotFound(err) {
		return fmt.Errorf("Cannot check cluster: %v", err)
	}

	applied, err := controller.Apply(ctx, o.Cluster)
	if err != nil {
		return err
	}

	printer, err := o.ToPrinter()
	if err != nil {
		return err
	}

	return printer.PrintObj(applied, o.Out)
}
0707010000006C000081A400000000000000000000000168AFB0EA00000614000000000000000000000000000000000000002D00000000ctlptl-0.8.43/pkg/cmd/create_cluster_test.gopackage cmd

import (
	"context"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestCreateCluster(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewCreateClusterOptions()
	o.IOStreams = streams

	fcc := &fakeClusterController{}
	err := o.run(fcc, "kind")
	require.NoError(t, err)
	assert.Equal(t, "cluster.ctlptl.dev/kind-kind created\n", out.String())
	assert.Equal(t, "kind-kind", fcc.lastApplyName)
}

type fakeClusterController struct {
	clusters       map[string]*api.Cluster
	lastApplyName  string
	lastDeleteName string
	nextError      error
}

func (cd *fakeClusterController) Delete(ctx context.Context, name string) error {
	if cd.nextError != nil {
		return cd.nextError
	}
	cd.lastDeleteName = name
	delete(cd.clusters, name)
	return nil
}

func (cd *fakeClusterController) Apply(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) {
	cd.lastApplyName = cluster.Name
	if cd.clusters == nil {
		cd.clusters = make(map[string]*api.Cluster)
	}
	cd.clusters[cluster.Name] = cluster
	return cluster, nil
}

func (cd *fakeClusterController) Get(ctx context.Context, name string) (*api.Cluster, error) {
	cluster, ok := cd.clusters[name]
	if ok {
		return cluster, nil
	}
	return nil, apierrors.NewNotFound(schema.GroupResource{Group: "ctlptl.dev", Resource: "clusters"}, name)
}
0707010000006D000081A400000000000000000000000168AFB0EA00000B1D000000000000000000000000000000000000002900000000ctlptl-0.8.43/pkg/cmd/create_registry.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"time"

	"github.com/spf13/cobra"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/registry"
)

type CreateRegistryOptions struct {
	*genericclioptions.PrintFlags
	genericclioptions.IOStreams

	Registry *api.Registry
}

func NewCreateRegistryOptions() *CreateRegistryOptions {
	o := &CreateRegistryOptions{
		PrintFlags: genericclioptions.NewPrintFlags("created"),
		IOStreams:  genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
		Registry: &api.Registry{
			TypeMeta: registry.TypeMeta(),
		},
	}
	return o
}

func (o *CreateRegistryOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "registry [name]",
		Short: "Create a registry with the given name",
		Example: "  ctlptl create registry ctlptl-registry\n" +
			"  ctlptl create registry ctlptl-registry --port=5000\n" +
			"  ctlptl create registry ctlptl-registry --port=5000 --listen-address 0.0.0.0",
		Run:  o.Run,
		Args: cobra.ExactArgs(1),
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	o.AddFlags(cmd)
	cmd.Flags().IntVar(&o.Registry.Port, "port", o.Registry.Port,
		"The port to expose the registry on host. If not specified, chooses a random port")
	cmd.Flags().StringVar(&o.Registry.ListenAddress, "listen-address", o.Registry.ListenAddress,
		"The host's IP address to bind the container to. If not set defaults to 127.0.0.1")
	cmd.Flags().StringVar(&o.Registry.Image, "image", registry.DefaultRegistryImageRef,
		"Registry image to use")

	return cmd
}

func (o *CreateRegistryOptions) Run(cmd *cobra.Command, args []string) {
	controller, err := registry.DefaultController(o.IOStreams)
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}

	err = o.run(controller, args[0])
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}
}

type registryCreator interface {
	Apply(ctx context.Context, registry *api.Registry) (*api.Registry, error)
	Get(ctx context.Context, name string) (*api.Registry, error)
}

func (o *CreateRegistryOptions) run(controller registryCreator, name string) error {
	a, err := newAnalytics()
	if err != nil {
		return err
	}
	a.Incr("cmd.create.registry", nil)
	defer a.Flush(time.Second)

	o.Registry.Name = name
	registry.FillDefaults(o.Registry)

	ctx := context.Background()
	_, err = controller.Get(ctx, o.Registry.Name)
	if err == nil {
		return fmt.Errorf("Cannot create registry: already exists")
	} else if err != nil && !errors.IsNotFound(err) {
		return fmt.Errorf("Cannot check registry: %v", err)
	}

	applied, err := controller.Apply(ctx, o.Registry)
	if err != nil {
		return err
	}

	printer, err := o.ToPrinter()
	if err != nil {
		return err
	}

	return printer.PrintObj(applied, o.Out)
}
0707010000006E000081A400000000000000000000000168AFB0EA00000460000000000000000000000000000000000000002E00000000ctlptl-0.8.43/pkg/cmd/create_registry_test.gopackage cmd

import (
	"context"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestCreateRegistry(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewCreateRegistryOptions()
	o.IOStreams = streams

	frc := &fakeRegistryController{}
	err := o.run(frc, "my-registry")
	require.NoError(t, err)
	assert.Equal(t, "registry.ctlptl.dev/my-registry created\n", out.String())
	assert.Equal(t, "my-registry", frc.lastRegistry.Name)
}

type fakeRegistryController struct {
	lastRegistry *api.Registry
}

func (cd *fakeRegistryController) Apply(ctx context.Context, registry *api.Registry) (*api.Registry, error) {
	cd.lastRegistry = registry
	return registry, nil
}

func (cd *fakeRegistryController) Get(ctx context.Context, name string) (*api.Registry, error) {
	return nil, apierrors.NewNotFound(schema.GroupResource{Group: "ctlptl.dev", Resource: "registries"}, name)
}
0707010000006F000081A400000000000000000000000168AFB0EA00001A79000000000000000000000000000000000000002000000000ctlptl-0.8.43/pkg/cmd/delete.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"time"

	"github.com/spf13/cobra"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/cluster"
	"github.com/tilt-dev/ctlptl/pkg/registry"
	"github.com/tilt-dev/ctlptl/pkg/visitor"
)

type DeleteOptions struct {
	*genericclioptions.PrintFlags
	*genericclioptions.FileNameFlags
	genericclioptions.IOStreams

	IgnoreNotFound bool
	Filenames      []string

	// We currently only support two modes - "true" and "false".
	// But we expect that there may be more modes in the future
	// (like what happened with kubectl delete --cascade).
	Cascade string

	clusterController clusterController
	registryDeleter   deleter
}

func NewDeleteOptions() *DeleteOptions {
	o := &DeleteOptions{
		PrintFlags: genericclioptions.NewPrintFlags("deleted"),
		IOStreams:  genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
	}
	o.FileNameFlags = &genericclioptions.FileNameFlags{Filenames: &o.Filenames}
	return o
}

func (o *DeleteOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "delete -f FILENAME",
		Short: "Delete a currently running cluster",
		Example: "  ctlptl delete -f cluster.yaml\n" +
			"  ctlptl delete cluster minikube",
		Run: o.Run,
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	o.FileNameFlags.AddFlags(cmd.Flags())

	cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.")
	cmd.Flags().StringVar(&o.Cascade, "cascade", "false",
		"If 'true', objects will be deleted recursively. "+
			"For example, deleting a cluster will delete any connected registries. Defaults to 'false'.")

	return cmd
}

func (o *DeleteOptions) Run(cmd *cobra.Command, args []string) {
	err := o.run(args)
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
		os.Exit(1)
	}
}

type deleter interface {
	Delete(ctx context.Context, name string) error
}

type clusterController interface {
	deleter
	Get(ctx context.Context, name string) (*api.Cluster, error)
}

func (o *DeleteOptions) run(args []string) error {
	a, err := newAnalytics()
	if err != nil {
		return err
	}
	a.Incr("cmd.delete", nil)
	defer a.Flush(time.Second)

	err = o.validateCascade()
	if err != nil {
		return err
	}

	resources, err := o.parseExplicitResources(args)
	if err != nil {
		return err
	}

	ctx := context.TODO()
	resources, err = o.cascadeResources(ctx, resources)
	if err != nil {
		return err
	}

	printer, err := o.ToPrinter()
	if err != nil {
		return err
	}

	for _, resource := range resources {
		switch resource := resource.(type) {
		case *api.Cluster:
			controller, err := o.getClusterController()
			if err != nil {
				return err
			}

			cluster.FillDefaults(resource)

			name := resource.Name

			// Normalize the name of the cluster so that
			// 'ctlptl delete cluster kind' works.
			cluster, err := normalizedGet(ctx, controller, name)
			if err == nil {
				name = cluster.Name
			}

			err = controller.Delete(ctx, name)

			if err != nil {
				if o.IgnoreNotFound && errors.IsNotFound(err) {
					continue
				}
				return err
			}
			err = printer.PrintObj(resource, o.Out)
			if err != nil {
				return err
			}
		case *api.Registry:
			if o.registryDeleter == nil {
				o.registryDeleter, err = registry.DefaultController(o.IOStreams)
				if err != nil {
					return err
				}
			}

			registry.FillDefaults(resource)
			err := o.registryDeleter.Delete(ctx, resource.Name)
			if err != nil {
				if o.IgnoreNotFound && errors.IsNotFound(err) {
					continue
				}
				return err
			}
			err = printer.PrintObj(resource, o.Out)
			if err != nil {
				return err
			}
		default:
			return fmt.Errorf("cannot delete: %T", resource)
		}
	}
	return nil
}

func (o *DeleteOptions) parseExplicitResources(args []string) ([]runtime.Object, error) {
	hasFiles := len(o.Filenames) > 0
	hasNames := len(args) >= 2
	if !(hasFiles || hasNames) {
		return nil, fmt.Errorf("Expected resources, specified as files ('ctlptl delete -f') or names ('ctlptl delete cluster foo`)")
	}
	if hasFiles && hasNames {
		return nil, fmt.Errorf("Can only specify one of {files, resource names}")
	}

	if hasFiles {
		visitors, err := visitor.FromStrings(o.Filenames, o.In)
		if err != nil {
			return nil, err
		}

		return visitor.DecodeAll(visitors)
	}

	var resources []runtime.Object
	t := args[0]
	names := args[1:]
	switch t {
	case "cluster", "clusters":
		for _, name := range names {
			resources = append(resources, &api.Cluster{
				TypeMeta: cluster.TypeMeta(),
				Name:     name,
			})
		}
	case "registry", "registries":
		for _, name := range names {
			resources = append(resources, &api.Registry{
				TypeMeta: registry.TypeMeta(),
				Name:     name,
			})
		}
	default:
		return nil, fmt.Errorf("Unrecognized type: %s", t)
	}
	return resources, nil
}

func (o *DeleteOptions) getClusterController() (clusterController, error) {
	if o.clusterController == nil {
		controller, err := cluster.DefaultController(o.IOStreams)
		if err != nil {
			return nil, err
		}
		o.clusterController = controller
	}
	return o.clusterController, nil
}

// Interpret the current cascade mode, adding new resources to the list
// before the resource that depends on them.
func (o *DeleteOptions) cascadeResources(ctx context.Context, resources []runtime.Object) ([]runtime.Object, error) {
	if o.Cascade != "true" {
		return resources, nil
	}

	result := make([]runtime.Object, 0, len(resources))
	registryNames := make(map[string]bool, 0)
	for _, r := range resources {
		switch r := r.(type) {
		case *api.Cluster:
			registryName := r.Registry

			// Check to see if we can find the cluster name in the registry status.
			if registryName == "" {
				controller, err := o.getClusterController()
				if err != nil {
					return nil, err
				}
				cluster, err := normalizedGet(ctx, controller, r.Name)
				if err != nil && !errors.IsNotFound(err) {
					return nil, err
				}
				if cluster != nil {
					registryName = cluster.Registry
				}
			}

			if registryName != "" && !registryNames[registryName] {
				registryNames[registryName] = true
				result = append(result, &api.Registry{
					TypeMeta: registry.TypeMeta(),
					Name:     registryName,
				})
			}
			result = append(result, r)

		case *api.Registry:
			if registryNames[r.Name] {
				continue
			}
			registryNames[r.Name] = true
			result = append(result, r)
		}
	}

	return result, nil
}

func (o *DeleteOptions) validateCascade() error {
	if o.Cascade == "" || o.Cascade == "true" || o.Cascade == "false" {
		return nil
	}
	return fmt.Errorf("Invalid cascade: %s. Valid values: true, false.", o.Cascade)
}
07070100000070000081A400000000000000000000000168AFB0EA0000144E000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/cmd/delete_test.gopackage cmd

import (
	"context"
	"io"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestDeleteByName(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	cd := &fakeClusterController{}
	o.clusterController = cd
	err := o.run([]string{"cluster", "kind-kind"})
	require.NoError(t, err)
	assert.Equal(t, "cluster.ctlptl.dev/kind-kind deleted\n", out.String())
	assert.Equal(t, "kind-kind", cd.lastDeleteName)
}

func TestDeleteByFile(t *testing.T) {
	streams, in, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	_, _ = in.Write([]byte(`apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: kind-kind
`))

	cd := &fakeClusterController{}
	o.clusterController = cd
	o.Filenames = []string{"-"}
	err := o.run([]string{})
	require.NoError(t, err)
	assert.Equal(t, "cluster.ctlptl.dev/kind-kind deleted\n", out.String())
	assert.Equal(t, "kind-kind", cd.lastDeleteName)
}

func TestDeleteDefault(t *testing.T) {
	streams, in, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	_, _ = in.Write([]byte(`apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
`))

	cd := &fakeClusterController{}
	o.clusterController = cd
	o.Filenames = []string{"-"}
	err := o.run([]string{})
	require.NoError(t, err)
	assert.Equal(t, "cluster.ctlptl.dev/kind-kind deleted\n", out.String())
	assert.Equal(t, "kind-kind", cd.lastDeleteName)
}

func TestDeleteNotFound(t *testing.T) {
	streams, _, _, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	cd := &fakeClusterController{nextError: errors.NewNotFound(
		schema.GroupResource{Group: "ctlptl.dev", Resource: "clusters"}, "garbage")}
	o.clusterController = cd
	err := o.run([]string{"cluster", "garbage"})
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), `clusters.ctlptl.dev "garbage" not found`)
	}
}

func TestDeleteIgnoreNotFound(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	cd := &fakeClusterController{nextError: errors.NewNotFound(
		schema.GroupResource{Group: "ctlptl.dev", Resource: "clusters"}, "garbage")}
	o.clusterController = cd
	o.IgnoreNotFound = true
	err := o.run([]string{"cluster", "garbage"})
	require.NoError(t, err)
	assert.Equal(t, "", out.String())
}

func TestDeleteRegistryByFile(t *testing.T) {
	streams, in, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	_, _ = in.Write([]byte(`apiVersion: ctlptl.dev/v1alpha1
kind: Registry
port: 5002
`))

	rd := &fakeDeleter{}
	o.registryDeleter = rd
	o.Filenames = []string{"-"}
	err := o.run([]string{})
	require.NoError(t, err)
	assert.Equal(t, "registry.ctlptl.dev/ctlptl-registry deleted\n", out.String())
	assert.Equal(t, "ctlptl-registry", rd.lastName)
}

func TestDeleteCascade(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	rd := &fakeDeleter{}
	cd := &fakeClusterController{
		clusters: map[string]*api.Cluster{
			"kind-kind": &api.Cluster{
				Name:     "kind-kind",
				Registry: "my-registry",
			},
		},
	}
	o.clusterController = cd
	o.registryDeleter = rd
	o.Cascade = "true"
	err := o.run([]string{"cluster", "kind-kind"})
	require.NoError(t, err)
	assert.Equal(t,
		"registry.ctlptl.dev/my-registry deleted\n"+
			"cluster.ctlptl.dev/kind-kind deleted\n",
		out.String())
	assert.Equal(t, "my-registry", rd.lastName)
}

func TestDeleteCascadeStdin(t *testing.T) {
	streams, in, out, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	rd := &fakeDeleter{}
	cd := &fakeClusterController{
		clusters: map[string]*api.Cluster{
			"kind-kind": &api.Cluster{
				Name:     "kind-kind",
				Registry: "my-registry",
			},
		},
	}
	o.clusterController = cd
	o.registryDeleter = rd
	o.Cascade = "true"
	o.Filenames = []string{"-"}
	_, _ = io.WriteString(in, `
apiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: my-registry
port: 10000
---
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: kind
registry: my-registry
`)
	err := o.run(nil)
	require.NoError(t, err)
	assert.Equal(t,
		"registry.ctlptl.dev/my-registry deleted\n"+
			"cluster.ctlptl.dev/kind-kind deleted\n",
		out.String())
	assert.Equal(t, "my-registry", rd.lastName)
}

func TestDeleteCascadeInvalid(t *testing.T) {
	streams, _, _, _ := genericclioptions.NewTestIOStreams()
	o := NewDeleteOptions()
	o.IOStreams = streams

	o.Cascade = "xxx"
	err := o.run([]string{"cluster", "kind-kind"})
	if assert.Error(t, err) {
		require.Contains(t, err.Error(), "Invalid cascade: xxx. Valid values: true, false.")
	}
}

type fakeDeleter struct {
	lastName  string
	nextError error
}

func (cd *fakeDeleter) Delete(ctx context.Context, name string) error {
	if cd.nextError != nil {
		return cd.nextError
	}
	cd.lastName = name
	return nil
}
07070100000071000081A400000000000000000000000168AFB0EA00000D21000000000000000000000000000000000000002800000000ctlptl-0.8.43/pkg/cmd/docker_desktop.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"time"

	"github.com/spf13/cobra"
	"gopkg.in/yaml.v3"

	"github.com/tilt-dev/ctlptl/pkg/cluster"
)

func NewDockerDesktopCommand() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "docker-desktop",
		Short: "Debugging tool for the Docker Desktop client",
		Example: "  ctlptl docker-desktop settings\n" +
			"  ctlptl docker-desktop set KEY VALUE",
	}

	cmd.AddCommand(&cobra.Command{
		Use:   "settings",
		Short: "Print the docker-desktop settings",
		Run:   withDockerDesktopClient("docker-desktop-settings", dockerDesktopSettings),
		Args:  cobra.ExactArgs(0),
	})

	cmd.AddCommand(&cobra.Command{
		Use:   "reset-cluster",
		Short: "Reset the docker-desktop Kubernetes cluster",
		Run:   withDockerDesktopClient("docker-desktop-reset-cluster", dockerDesktopResetCluster),
		Args:  cobra.ExactArgs(0),
	})

	cmd.AddCommand(&cobra.Command{
		Use:   "open",
		Short: "Open docker-desktop",
		Run:   withDockerDesktopClient("docker-desktop-open", dockerDesktopOpen),
		Args:  cobra.ExactArgs(0),
	})

	cmd.AddCommand(&cobra.Command{
		Use:   "quit",
		Short: "Shutdown docker-desktop",
		Run:   withDockerDesktopClient("docker-desktop-quit", dockerDesktopQuit),
		Args:  cobra.ExactArgs(0),
	})

	cmd.AddCommand(&cobra.Command{
		Use:   "set KEY VALUE",
		Short: "Set the docker-desktop settings",
		Long: "Set the docker-desktop settings\n\n" +
			"The first argument is the full path to the setting.\n\n" +
			"The second argument is the desired value.\n\n" +
			"Most settings are scalars. vm.fileSharing is a list of paths separated by commas.",
		Example: "  ctlptl docker-desktop set vm.resources.cpus 2\n" +
			"   ctlptl docker-desktop set kubernetes.enabled false\n" +
			"  ctlptl docker-desktop set vm.fileSharing /Users,/Volumes,/private,/tmp",
		Run:  withDockerDesktopClient("docker-desktop-set", dockerDesktopSet),
		Args: cobra.ExactArgs(2),
	})

	return cmd
}

func withDockerDesktopClient(name string, run func(client cluster.DockerDesktopClient, args []string) error) func(_ *cobra.Command, args []string) {
	return func(_ *cobra.Command, args []string) {
		a, err := newAnalytics()
		if err != nil {
			_, _ = fmt.Fprintf(os.Stderr, "analytics: %v\n", err)
			os.Exit(1)
		}
		a.Incr(fmt.Sprintf("cmd.%s", name), nil)
		defer a.Flush(time.Second)

		c, err := cluster.NewDockerDesktopClient()
		if err != nil {
			_, _ = fmt.Fprintf(os.Stderr, "ctlptl docker-desktop: %v\n", err)
			os.Exit(1)
		}

		err = run(c, args)
		if err != nil {
			_, _ = fmt.Fprintf(os.Stderr, "ctlptl docker-desktop: %v\n", err)
			os.Exit(1)
		}
	}
}

func dockerDesktopSettings(c cluster.DockerDesktopClient, args []string) error {
	settings, err := c.SettingsValues(context.Background())
	if err != nil {
		return err
	}

	encoder := yaml.NewEncoder(os.Stdout)
	return encoder.Encode(settings)
}

func dockerDesktopSet(c cluster.DockerDesktopClient, args []string) error {
	return c.SetSettingValue(context.Background(), args[0], args[1])
}

func dockerDesktopResetCluster(c cluster.DockerDesktopClient, args []string) error {
	return c.ResetCluster(context.Background())
}

func dockerDesktopOpen(c cluster.DockerDesktopClient, args []string) error {
	return c.Open(context.Background())
}

func dockerDesktopQuit(c cluster.DockerDesktopClient, args []string) error {
	return c.Quit(context.Background())
}
07070100000072000081A400000000000000000000000168AFB0EA000001C0000000000000000000000000000000000000001E00000000ctlptl-0.8.43/pkg/cmd/docs.gopackage cmd

import (
	"log"

	"github.com/spf13/cobra"
	"github.com/spf13/cobra/doc"
)

func newDocsCommand(root *cobra.Command) *cobra.Command {
	return &cobra.Command{
		Use:    "docs [path]",
		Short:  "Generate the markdown docs for ctlptl at [path]",
		Hidden: true,
		Args:   cobra.ExactArgs(1),
		Run: func(_ *cobra.Command, args []string) {
			err := doc.GenMarkdownTree(root, args[0])
			if err != nil {
				log.Fatal(err)
			}
		},
	}
}
07070100000073000081A400000000000000000000000168AFB0EA00001FE0000000000000000000000000000000000000001D00000000ctlptl-0.8.43/pkg/cmd/get.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"sort"
	"time"

	"github.com/spf13/cobra"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/api/meta"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/util/duration"
	"k8s.io/cli-runtime/pkg/genericclioptions"
	"k8s.io/cli-runtime/pkg/printers"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/cluster"
	"github.com/tilt-dev/ctlptl/pkg/registry"
)

type GetOptions struct {
	*genericclioptions.PrintFlags
	genericclioptions.IOStreams
	StartTime      time.Time
	IgnoreNotFound bool
	FieldSelector  string
}

func NewGetOptions() *GetOptions {
	return &GetOptions{
		PrintFlags: genericclioptions.NewPrintFlags(""),
		IOStreams:  genericclioptions.IOStreams{Out: os.Stdout, ErrOut: os.Stderr, In: os.Stdin},
		StartTime:  time.Now(),
	}
}

func (o *GetOptions) Command() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "get [type] [name]",
		Short: "Read currently running clusters and registries",
		Long: `Read the status of currently running clusters and registries.

Supports the same flags as kubectl for selecting
and printing fields. The kubectl cheat sheet may help:

https://kubernetes.io/docs/reference/kubectl/cheatsheet/#formatting-output
`,
		Example: "  ctlptl get\n" +
			"  ctlptl get cluster microk8s -o yaml\n" +
			"  ctlptl get cluster kind-kind -o template --template '{{.status.localRegistryHosting.host}}'\n",
		Run:  o.Run,
		Args: cobra.MaximumNArgs(2),
	}

	cmd.SetOut(o.Out)
	cmd.SetErr(o.ErrOut)
	o.AddFlags(cmd)

	cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.")
	cmd.Flags().StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")

	return cmd
}

func (o *GetOptions) Run(cmd *cobra.Command, args []string) {
	a, err := newAnalytics()
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "analytics: %v\n", err)
		os.Exit(1)
	}
	a.Incr("cmd.get", nil)
	defer a.Flush(time.Second)

	ctx := context.TODO()
	t := "cluster"
	if len(args) >= 1 {
		t = args[0]
	}
	var resource runtime.Object
	switch t {
	case "registry", "registries":
		c, err := registry.DefaultController(o.IOStreams)
		if err != nil {
			_, _ = fmt.Fprintf(o.ErrOut, "Loading controller: %v\n", err)
			os.Exit(1)
		}

		if len(args) >= 2 {
			resource, err = c.Get(ctx, args[1])
			if err != nil {
				if errors.IsNotFound(err) && o.IgnoreNotFound {
					os.Exit(0)
				}
				_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
				os.Exit(1)
			}
		} else {
			resource, err = c.List(ctx, registry.ListOptions{FieldSelector: o.FieldSelector})
			if err != nil {
				_, _ = fmt.Fprintf(o.ErrOut, "List registries: %v\n", err)
				os.Exit(1)
			}
		}

	case "cluster", "clusters":
		c, err := cluster.DefaultController(o.IOStreams)
		if err != nil {
			_, _ = fmt.Fprintf(o.ErrOut, "Loading controller: %v\n", err)
			os.Exit(1)
		}

		if len(args) >= 2 {
			resource, err = normalizedGet(ctx, c, args[1])
			if err != nil {
				if errors.IsNotFound(err) && o.IgnoreNotFound {
					os.Exit(0)
				}
				_, _ = fmt.Fprintf(o.ErrOut, "%v\n", err)
				os.Exit(1)
			}
		} else {
			resource, err = c.List(ctx, cluster.ListOptions{FieldSelector: o.FieldSelector})
			if err != nil {
				_, _ = fmt.Fprintf(o.ErrOut, "List clusters: %v\n", err)
				os.Exit(1)
			}
		}

	default:
		_, _ = fmt.Fprintf(o.ErrOut, "Unrecognized type: %s. Possible values: cluster, registry.\n", t)
		os.Exit(1)
	}

	err = o.Print(resource)
	if err != nil {
		_, _ = fmt.Fprintf(o.ErrOut, "Error: %s\n", err)
		os.Exit(1)
	}
}

func (o *GetOptions) ToPrinter() (printers.ResourcePrinter, error) {
	if !o.OutputFlagSpecified() {
		return printers.NewTablePrinter(printers.PrintOptions{}), nil
	}
	return o.PrintFlags.ToPrinter()
}

func (o *GetOptions) Print(obj runtime.Object) error {
	if obj == nil {
		fmt.Println("No resources found")
		return nil
	}

	printer, err := o.ToPrinter()
	if err != nil {
		return err
	}

	if !o.OutputFlagSpecified() {
		err = printer.PrintObj(o.toTable(obj), o.Out)
		if err != nil {
			return err
		}
	} else {
		// Name printer only supports UnstructuredList for mysterious reasons.
		_, isNamePrinter := printer.(*printers.NamePrinter)
		if isNamePrinter && meta.IsListType(obj) {
			items, err := meta.ExtractList(obj)
			if err != nil {
				return err
			}
			for _, item := range items {
				err = printer.PrintObj(item, o.Out)
				if err != nil {
					return err
				}
			}
		} else {
			err = printer.PrintObj(obj, o.Out)
			if err != nil {
				return err
			}
		}
	}
	return nil
}

func (o *GetOptions) OutputFlagSpecified() bool {
	return o.PrintFlags.OutputFlagSpecified != nil && o.PrintFlags.OutputFlagSpecified()
}

func (o *GetOptions) toTable(obj runtime.Object) runtime.Object {
	switch r := obj.(type) {
	case *api.Registry:
		return o.registriesAsTable([]api.Registry{*r})
	case *api.RegistryList:
		return o.registriesAsTable(r.Items)
	case *api.Cluster:
		return o.clustersAsTable([]api.Cluster{*r})
	case *api.ClusterList:
		return o.clustersAsTable(r.Items)
	default:
		return obj
	}
}

func (o *GetOptions) clustersAsTable(clusters []api.Cluster) runtime.Object {
	table := metav1.Table{
		TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "metav1.k8s.io"},
		ColumnDefinitions: []metav1.TableColumnDefinition{
			metav1.TableColumnDefinition{
				Name: "Current",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Name",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Product",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Age",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Registry",
				Type: "string",
			},
		},
	}

	for _, cluster := range clusters {
		age := "unknown"
		cTime := cluster.Status.CreationTimestamp.Time
		if !cTime.IsZero() {
			age = duration.ShortHumanDuration(o.StartTime.Sub(cTime))
		}

		rHost := ""
		if cluster.Status.LocalRegistryHosting != nil {
			rHost = cluster.Status.LocalRegistryHosting.Host
		}
		if rHost == "" {
			rHost = "none"
		}

		current := ""
		if cluster.Status.Current {
			current = "*"
		}

		table.Rows = append(table.Rows, metav1.TableRow{
			Cells: []interface{}{
				current,
				cluster.Name,
				cluster.Product,
				age,
				rHost,
			},
		})
	}

	return &table
}

func (o *GetOptions) registriesAsTable(registries []api.Registry) runtime.Object {
	table := metav1.Table{
		TypeMeta: metav1.TypeMeta{Kind: "Table", APIVersion: "metav1.k8s.io"},
		ColumnDefinitions: []metav1.TableColumnDefinition{
			metav1.TableColumnDefinition{
				Name: "Name",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Host Address",
				Type: "int",
			},
			metav1.TableColumnDefinition{
				Name: "Container Address",
				Type: "string",
			},
			metav1.TableColumnDefinition{
				Name: "Age",
				Type: "string",
			},
		},
	}

	// sort chronologically newest -> oldest to match `docker ps` behavior
	sort.SliceStable(registries, func(i, j int) bool {
		return registries[i].Status.CreationTimestamp.After(registries[j].Status.CreationTimestamp.Time)
	})

	for _, registry := range registries {
		age := "unknown"
		cTime := registry.Status.CreationTimestamp.Time
		if !cTime.IsZero() {
			age = duration.ShortHumanDuration(o.StartTime.Sub(cTime))
		}

		hostAddress := "none"
		if registry.Status.ListenAddress != "" && registry.Status.HostPort != 0 {
			hostAddress = fmt.Sprintf("%s:%d", registry.Status.ListenAddress, registry.Status.HostPort)
		}

		containerAddress := "none"
		if registry.Status.ContainerPort != 0 && registry.Status.IPAddress != "" {
			containerAddress = fmt.Sprintf("%s:%d", registry.Status.IPAddress, registry.Status.ContainerPort)
		}

		table.Rows = append(table.Rows, metav1.TableRow{
			Cells: []interface{}{
				registry.Name,
				hostAddress,
				containerAddress,
				age,
			},
		})
	}

	return &table
}
07070100000074000081A400000000000000000000000168AFB0EA00000E1E000000000000000000000000000000000000002200000000ctlptl-0.8.43/pkg/cmd/get_test.gopackage cmd

import (
	"testing"
	"time"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	"github.com/tilt-dev/localregistry-go"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/cluster"
	"github.com/tilt-dev/ctlptl/pkg/registry"
)

var createTime = time.Unix(1500000000, 0)
var startTime = time.Unix(1600000000, 0)
var clusterType = cluster.TypeMeta()
var clusterList = &api.ClusterList{
	TypeMeta: cluster.ListTypeMeta(),
	Items: []api.Cluster{
		api.Cluster{
			TypeMeta: clusterType,
			Name:     "microk8s",
			Product:  "microk8s",
			Status: api.ClusterStatus{
				CreationTimestamp: metav1.Time{Time: createTime},
				Current:           true,
			},
		},
		api.Cluster{
			TypeMeta: clusterType,
			Name:     "kind-kind",
			Product:  "KIND",
			Status: api.ClusterStatus{
				CreationTimestamp: metav1.Time{Time: createTime},
				LocalRegistryHosting: &localregistry.LocalRegistryHostingV1{
					Host: "localhost:5000",
				},
			},
		},
	},
}

var registryType = registry.TypeMeta()
var registryList = &api.RegistryList{
	TypeMeta: registry.ListTypeMeta(),
	Items: []api.Registry{
		api.Registry{
			TypeMeta:      registryType,
			Name:          "ctlptl-registry",
			ListenAddress: "127.0.0.1",
			Port:          5001,
			Status: api.RegistryStatus{
				CreationTimestamp: metav1.Time{Time: createTime},
				IPAddress:         "172.17.0.2",
				ListenAddress:     "0.0.0.0",
				ContainerPort:     5000,
				HostPort:          5001,
			},
		},
		api.Registry{
			TypeMeta:      registryType,
			Name:          "ctlptl-registry-loopback",
			ListenAddress: "127.0.0.1",
			Port:          5002,
			Status: api.RegistryStatus{
				CreationTimestamp: metav1.Time{Time: createTime},
				IPAddress:         "172.17.0.3",
				ListenAddress:     "127.0.0.1",
				ContainerPort:     5000,
				HostPort:          5002,
			},
		},
	},
}

func TestDefaultPrint(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewGetOptions()
	o.IOStreams = streams
	o.StartTime = startTime

	err := o.Print(o.toTable(clusterList))
	require.NoError(t, err)
	assert.Equal(t, out.String(), `CURRENT   NAME        PRODUCT    AGE   REGISTRY
*         microk8s    microk8s   3y    none
          kind-kind   KIND       3y    localhost:5000
`)
}

func TestYAML(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewGetOptions()
	o.IOStreams = streams
	o.StartTime = startTime

	err := o.Command().Flags().Set("output", "yaml")
	require.NoError(t, err)

	err = o.Print(clusterList)
	require.NoError(t, err)
	assert.Equal(t, `apiVersion: ctlptl.dev/v1alpha1
items:
- apiVersion: ctlptl.dev/v1alpha1
  kind: Cluster
  name: microk8s
  product: microk8s
  status:
    creationTimestamp: "2017-07-14T02:40:00Z"
    current: true
- apiVersion: ctlptl.dev/v1alpha1
  kind: Cluster
  name: kind-kind
  product: KIND
  status:
    creationTimestamp: "2017-07-14T02:40:00Z"
    localRegistryHosting:
      host: localhost:5000
kind: ClusterList
`, out.String())
}

func TestRegistryPrint(t *testing.T) {
	streams, _, out, _ := genericclioptions.NewTestIOStreams()
	o := NewGetOptions()
	o.IOStreams = streams
	o.StartTime = startTime

	err := o.Print(o.toTable(registryList))
	require.NoError(t, err)
	assert.Equal(t, `NAME                       HOST ADDRESS     CONTAINER ADDRESS   AGE
ctlptl-registry            0.0.0.0:5001     172.17.0.2:5000     3y
ctlptl-registry-loopback   127.0.0.1:5002   172.17.0.3:5000     3y
`, out.String())
}
07070100000075000081A400000000000000000000000168AFB0EA0000048E000000000000000000000000000000000000002300000000ctlptl-0.8.43/pkg/cmd/normalize.gopackage cmd

import (
	"context"

	"github.com/tilt-dev/clusterid"
	"k8s.io/apimachinery/pkg/api/errors"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

type clusterGetter interface {
	Get(ctx context.Context, name string) (*api.Cluster, error)
}

// We create clusters like:
// ctlptl create cluster kind
// For most clusters, the name of the cluster will match the name of the product.
// But for cases where they don't match, we want
// `ctlptl delete cluster kind` to automatically map to `ctlptl delete cluster kind-kind`
func normalizedGet(ctx context.Context, controller clusterGetter, name string) (*api.Cluster, error) {
	cluster, err := controller.Get(ctx, name)
	if err == nil {
		return cluster, nil
	}

	if !errors.IsNotFound(err) {
		return nil, err
	}

	origErr := err
	retryName := ""
	if name == string(clusterid.ProductKIND) {
		retryName = clusterid.ProductKIND.DefaultClusterName()
	} else if name == string(clusterid.ProductK3D) {
		retryName = clusterid.ProductK3D.DefaultClusterName()
	}

	if retryName == "" {
		return nil, origErr
	}

	cluster, err = controller.Get(ctx, retryName)
	if err == nil {
		return cluster, nil
	}
	return nil, origErr
}
07070100000076000081A400000000000000000000000168AFB0EA000002E9000000000000000000000000000000000000001E00000000ctlptl-0.8.43/pkg/cmd/root.gopackage cmd

import (
	"github.com/spf13/cobra"
	"github.com/tilt-dev/wmclient/pkg/analytics"
)

func NewRootCommand() *cobra.Command {
	var rootCmd = &cobra.Command{
		Use:   "ctlptl [command]",
		Short: "Mess around with local Kubernetes clusters without consequences",
		Example: "  ctlptl get clusters\n" +
			"  ctlptl apply -f my-cluster.yaml",
	}

	rootCmd.AddCommand(NewCreateOptions().Command())
	rootCmd.AddCommand(NewGetOptions().Command())
	rootCmd.AddCommand(NewApplyOptions().Command())
	rootCmd.AddCommand(NewDeleteOptions().Command())
	rootCmd.AddCommand(NewDockerDesktopCommand())
	rootCmd.AddCommand(newDocsCommand(rootCmd))
	rootCmd.AddCommand(analytics.NewCommand())
	rootCmd.AddCommand(NewSocatCommand())

	return rootCmd
}
07070100000077000081A400000000000000000000000168AFB0EA00000519000000000000000000000000000000000000001F00000000ctlptl-0.8.43/pkg/cmd/socat.gopackage cmd

import (
	"context"
	"fmt"
	"os"
	"strconv"

	"github.com/spf13/cobra"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/internal/socat"
)

func NewSocatCommand() *cobra.Command {
	var cmd = &cobra.Command{
		Use:   "socat",
		Short: "Use socat to connect components. Experimental.",
	}

	cmd.AddCommand(&cobra.Command{
		Use:     "connect-remote-docker",
		Short:   "Connects a local port to a remote port on a machine running Docker",
		Example: "  ctlptl socat connect-remote-docker [port]\n",
		Run:     connectRemoteDocker,
		Args:    cobra.ExactArgs(1),
	})

	return cmd
}

func connectRemoteDocker(cmd *cobra.Command, args []string) {
	port, err := strconv.Atoi(args[0])
	if err != nil {
		_, _ = fmt.Fprintf(os.Stderr, "connect-remote-docker: %v\n", err)
		os.Exit(1)
	}

	ctx := context.Background()
	streams := genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}
	dockerCLI, err := dctr.NewCLI(streams)
	if err != nil {
		_, _ = fmt.Fprintf(os.Stderr, "connect-remote-docker: %v\n", err)
		os.Exit(1)
	}

	c := socat.NewController(dockerCLI)
	err = c.ConnectRemoteDockerPort(ctx, port)
	if err != nil {
		_, _ = fmt.Fprintf(os.Stderr, "connect-remote-docker: %v\n", err)
		os.Exit(1)
	}
}
07070100000078000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001900000000ctlptl-0.8.43/pkg/docker07070100000079000081A400000000000000000000000168AFB0EA0000083F000000000000000000000000000000000000002300000000ctlptl-0.8.43/pkg/docker/docker.gopackage docker

import (
	"strings"
)

const ContainerLabelRole = "dev.tilt.ctlptl.role"

// Checks whether the Docker daemon is running on a local machine.
// Remote docker daemons will likely need a port forwarder to work properly.
func IsLocalHost(dockerHost string) bool {
	return dockerHost == "" ||

		// Check all the "standard" docker localhosts.
		// https://github.com/docker/cli/blob/a32cd16160f1b41c1c4ae7bee4dac929d1484e59/opts/hosts.go#L22
		strings.HasPrefix(dockerHost, "tcp://localhost:") ||
		strings.HasPrefix(dockerHost, "tcp://127.0.0.1:") ||

		// https://github.com/moby/moby/blob/master/client/client_windows.go#L4
		strings.HasPrefix(dockerHost, "npipe:") ||

		// https://github.com/moby/moby/blob/master/client/client_unix.go#L6
		strings.HasPrefix(dockerHost, "unix:")
}

// Checks whether the DOCKER_HOST looks like a local Docker Engine.
func IsLocalDockerEngineHost(dockerHost string) bool {
	if strings.HasPrefix(dockerHost, "unix:") {
		// Many tools (like colima) try to masquerade as Docker Desktop but run
		// on a different socket.
		// see:
		// https://github.com/tilt-dev/ctlptl/issues/196
		// https://docs.docker.com/desktop/faqs/#how-do-i-connect-to-the-remote-docker-engine-api
		return strings.Contains(dockerHost, "/var/run/docker.sock") ||
			// Docker Desktop for Linux - socket is in ~/.docker/desktop/docker.sock
			strings.HasSuffix(dockerHost, "/.docker/desktop/docker.sock") ||
			// Docker Desktop for Mac 4.13+ - socket is in ~/.docker/run/docker.sock
			strings.HasSuffix(dockerHost, "/.docker/run/docker.sock")
	}

	// Docker daemons on other local protocols are treated as docker desktop.
	return IsLocalHost(dockerHost)
}

// Checks whether the DOCKER_HOST looks like a local Docker Desktop.
// A local Docker Engine has some additional APIs for VM management (i.e., Docker Desktop).
func IsLocalDockerDesktop(dockerHost string, os string) bool {
	if os == "darwin" || os == "windows" {
		return IsLocalDockerEngineHost(dockerHost)
	}
	return strings.HasPrefix(dockerHost, "unix:") &&
		strings.HasSuffix(dockerHost, "/.docker/desktop/docker.sock")
}
0707010000007A000081A400000000000000000000000168AFB0EA00000889000000000000000000000000000000000000002800000000ctlptl-0.8.43/pkg/docker/docker_test.gopackage docker

import (
	"fmt"
	"testing"

	"github.com/stretchr/testify/assert"
)

type dockerHostTestCase struct {
	host          string
	localDaemon   bool
	dockerDesktop bool
}

func TestIsLocalDockerHost(t *testing.T) {
	cases := []dockerHostTestCase{
		dockerHostTestCase{"", true, true},
		dockerHostTestCase{"tcp://localhost:2375", true, true},
		dockerHostTestCase{"tcp://127.0.0.1:2375", true, true},
		dockerHostTestCase{"npipe:////./pipe/docker_engine", true, true},
		dockerHostTestCase{"unix:///var/run/docker.sock", true, true},
		dockerHostTestCase{"tcp://cluster:2375", false, false},
		dockerHostTestCase{"http://cluster:2375", false, false},
		dockerHostTestCase{"unix:///Users/USER/.colima/docker.sock", true, false},
		dockerHostTestCase{"unix:///Users/USER/.docker/desktop/docker.sock", true, true},
		dockerHostTestCase{"unix:///Users/USER/.docker/run/docker.sock", true, true},
	}
	for i, c := range cases {
		c := c
		t.Run(fmt.Sprintf("%s-%d", t.Name(), i), func(t *testing.T) {
			assert.Equal(t, c.localDaemon, IsLocalHost(c.host))
			assert.Equal(t, c.dockerDesktop, IsLocalDockerEngineHost(c.host))
		})
	}
}

type dockerDesktopTestCase struct {
	host     string
	os       string
	expected bool
}

func TestIsLocalDockerDesktop(t *testing.T) {
	cases := []dockerDesktopTestCase{
		dockerDesktopTestCase{"", "linux", false},
		dockerDesktopTestCase{"tcp://localhost:2375", "linux", false},
		dockerDesktopTestCase{"tcp://127.0.0.1:2375", "linux", false},
		dockerDesktopTestCase{"npipe:////./pipe/docker_engine", "windows", true},
		dockerDesktopTestCase{"unix:///var/run/docker.sock", "darwin", true},
		dockerDesktopTestCase{"unix:///var/run/docker.sock", "linux", false},
		dockerDesktopTestCase{"tcp://cluster:2375", "linux", false},
		dockerDesktopTestCase{"http://cluster:2375", "linux", false},
		dockerDesktopTestCase{"unix:///Users/USER/.colima/docker.sock", "linux", false},
		dockerDesktopTestCase{"unix:///Users/USER/.docker/desktop/docker.sock", "linux", true},
	}
	for i, c := range cases {
		c := c
		t.Run(fmt.Sprintf("%s-%d", t.Name(), i), func(t *testing.T) {
			assert.Equal(t, c.expected, IsLocalDockerDesktop(c.host, c.os))
		})
	}
}
0707010000007B000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001B00000000ctlptl-0.8.43/pkg/encoding0707010000007C000081A400000000000000000000000168AFB0EA000005D6000000000000000000000000000000000000002700000000ctlptl-0.8.43/pkg/encoding/encoding.gopackage encoding

import (
	"bufio"
	"bytes"
	"fmt"
	"io"

	"github.com/pkg/errors"
	"gopkg.in/yaml.v3"
	"k8s.io/apimachinery/pkg/runtime"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

// Parses a stream of YAML.
func ParseStream(r io.Reader) ([]runtime.Object, error) {
	var current bytes.Buffer
	reader := io.TeeReader(bufio.NewReader(r), &current)

	objDecoder := yaml.NewDecoder(&current)
	objDecoder.KnownFields(true)

	typeDecoder := yaml.NewDecoder(reader)
	result := []runtime.Object{}
	for {
		tm := api.TypeMeta{}
		if err := typeDecoder.Decode(&tm); err != nil {
			if err == io.EOF {
				break
			}
			return nil, err
		}

		obj, err := determineObj(tm)
		if err != nil {
			return nil, err
		}

		if err := objDecoder.Decode(obj); err != nil {
			if err == io.EOF {
				break
			}
			return nil, errors.Wrapf(err, "decoding %s", tm)
		}

		result = append(result, obj)
	}
	return result, nil
}

// Determines the object corresponding to this type meta
func determineObj(tm api.TypeMeta) (runtime.Object, error) {
	// decode specific (apiVersion, kind)
	switch tm.APIVersion {
	// Currently we only support ctlptl.dev/v1alpha1
	case "ctlptl.dev/v1alpha1":
		switch tm.Kind {
		case "Cluster":
			return &api.Cluster{}, nil
		case "Registry":
			return &api.Registry{}, nil
		default:
			return nil, fmt.Errorf("ctlptl config must contain: `kind: Cluster` or `kind: Registry`")
		}
	default:
		return nil, fmt.Errorf("ctlptl config must contain: `apiVersion: ctlptl.dev/v1alpha1`")
	}
}
0707010000007D000081A400000000000000000000000168AFB0EA00000597000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/encoding/encoding_test.gopackage encoding

import (
	"strings"
	"testing"

	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

func TestParse(t *testing.T) {
	yaml := `
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: microk8s
product: microk8s
---
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: kind-kind
product: KIND
`
	data, err := ParseStream(strings.NewReader(yaml))
	assert.NoError(t, err)
	require.Equal(t, 2, len(data))
	assert.Equal(t, "microk8s", data[0].(*api.Cluster).Name)
	assert.Equal(t, "kind-kind", data[1].(*api.Cluster).Name)
}

func TestParseTypo(t *testing.T) {
	yaml := `
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
nameTypo: microk8s
product: microk8s
`
	_, err := ParseStream(strings.NewReader(yaml))
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), "decoding {Cluster ctlptl.dev/v1alpha1}: yaml: unmarshal errors:\n  line 4: field nameTypo not found in type api.Cluster")
	}
}

func TestParseTypoSecondObject(t *testing.T) {
	yaml := `
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: microk8s
product: microk8s
---
apiVersion: ctlptl.dev/v1alpha1
kind: Cluster
nameTypo: microk8s
product: microk8s
`
	_, err := ParseStream(strings.NewReader(yaml))
	if assert.Error(t, err) {
		assert.Contains(t, err.Error(), "decoding {Cluster ctlptl.dev/v1alpha1}: yaml: unmarshal errors:\n  line 9: field nameTypo not found in type api.Cluster")
	}
}
0707010000007E000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001B00000000ctlptl-0.8.43/pkg/registry0707010000007F000081A400000000000000000000000168AFB0EA00000206000000000000000000000000000000000000002600000000ctlptl-0.8.43/pkg/registry/options.gopackage registry

import (
	"fmt"

	"k8s.io/apimachinery/pkg/fields"

	"github.com/tilt-dev/ctlptl/pkg/api"
)

type ListOptions struct {
	FieldSelector string
}

type registryFields api.Registry

func (cf *registryFields) Has(field string) bool {
	return field == "name"
}

func (cf *registryFields) Get(field string) string {
	if field == "name" {
		return (*api.Registry)(cf).Name
	}
	if field == "port" {
		return fmt.Sprintf("%d", (*api.Registry)(cf).Port)
	}
	return ""
}

var _ fields.Fields = &registryFields{}
07070100000080000081A400000000000000000000000168AFB0EA0000332C000000000000000000000000000000000000002700000000ctlptl-0.8.43/pkg/registry/registry.gopackage registry

import (
	"context"
	"fmt"
	"reflect"
	"regexp"
	"sort"
	"strings"
	"time"

	"github.com/distribution/reference"
	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/filters"
	"github.com/docker/docker/api/types/network"
	"github.com/docker/go-connections/nat"
	"github.com/phayes/freeport"
	"k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/fields"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/internal/socat"
	"github.com/tilt-dev/ctlptl/pkg/api"
	"github.com/tilt-dev/ctlptl/pkg/docker"
)

var (
	typeMeta      = api.TypeMeta{APIVersion: "ctlptl.dev/v1alpha1", Kind: "Registry"}
	listTypeMeta  = api.TypeMeta{APIVersion: "ctlptl.dev/v1alpha1", Kind: "RegistryList"}
	groupResource = schema.GroupResource{Group: "ctlptl.dev", Resource: "registries"}
)

const DefaultRegistryImageRef = "docker.io/library/registry:2" // The registry everyone uses.

// https://github.com/moby/moby/blob/v20.10.3/api/types/types.go#L313
const containerStateRunning = "running"

// ctlptlLabels are labels applied on create to registry containers.
//
// These are not considered for equality purposes, as ctlptl supports interop
// with local cluster tools that support self-managing a registry (e.g. k3d),
// so we don't want to unnecessarily re-create them. However, if ctlptl is used
// to modify (i.e. delete&create) a registry, the new object _will_ include
// these labels.
var ctlptlLabels = map[string]string{
	docker.ContainerLabelRole: "registry",
}

func TypeMeta() api.TypeMeta {
	return typeMeta
}

func ListTypeMeta() api.TypeMeta {
	return listTypeMeta
}

func FillDefaults(registry *api.Registry) {
	// Create a default name if one isn't in the YAML.
	// The default name is determined by the underlying product.
	if registry.Name == "" {
		registry.Name = "ctlptl-registry"
	}
}

type socatController interface {
	ConnectRemoteDockerPort(ctx context.Context, port int) error
}

type Controller struct {
	iostreams genericclioptions.IOStreams
	dockerCLI dctr.CLI
	socat     socatController
}

func NewController(iostreams genericclioptions.IOStreams, dockerCLI dctr.CLI) *Controller {
	return &Controller{
		iostreams: iostreams,
		dockerCLI: dockerCLI,
		socat:     socat.NewController(dockerCLI),
	}
}

func DefaultController(iostreams genericclioptions.IOStreams) (*Controller, error) {
	dockerCLI, err := dctr.NewCLI(iostreams)
	if err != nil {
		return nil, err
	}

	return &Controller{
		iostreams: iostreams,
		dockerCLI: dockerCLI,
		socat:     socat.NewController(dockerCLI),
	}, nil
}

func (c *Controller) Get(ctx context.Context, name string) (*api.Registry, error) {
	list, err := c.List(ctx, ListOptions{FieldSelector: fmt.Sprintf("name=%s", name)})
	if err != nil {
		return nil, err
	}

	if len(list.Items) == 0 {
		return nil, errors.NewNotFound(groupResource, name)
	}

	item := list.Items[0]
	return &item, nil
}

func (c *Controller) List(ctx context.Context, options ListOptions) (*api.RegistryList, error) {
	selector, err := fields.ParseSelector(options.FieldSelector)
	if err != nil {
		return nil, err
	}

	containers, err := c.registryContainers(ctx)
	if err != nil {
		return nil, err
	}

	result := []api.Registry{}
	for _, container := range containers {
		if len(container.Names) == 0 {
			continue
		}
		name := strings.TrimPrefix(container.Names[0], "/")
		created := time.Unix(container.Created, 0)

		inspect, err := c.dockerCLI.Client().ContainerInspect(ctx, container.ID)
		if err != nil {
			return nil, err
		}
		env := inspect.Config.Env
		netSummary := container.NetworkSettings
		ipAddress := ""
		networks := []string{}
		if netSummary != nil {
			for network := range netSummary.Networks {
				networks = append(networks, network)
			}
			bridge, ok := netSummary.Networks["bridge"]
			if ok && bridge != nil {
				ipAddress = bridge.IPAddress
			}
		}
		sort.Strings(networks)

		var warnings []string
		listenAddress, hostPort, containerPort, err := c.ipAndPortsFrom(container.Ports)
		if err != nil {
			warnings = append(warnings, fmt.Sprintf("Unexpected registry ports: %+v", container.Ports))
		}

		registry := &api.Registry{
			TypeMeta: typeMeta,
			Name:     name,
			Port:     hostPort,
			Status: api.RegistryStatus{
				CreationTimestamp: metav1.Time{Time: created},
				ContainerID:       container.ID,
				IPAddress:         ipAddress,
				HostPort:          hostPort,
				ListenAddress:     listenAddress,
				ContainerPort:     containerPort,
				Networks:          networks,
				State:             container.State,
				Labels:            container.Labels,
				Image:             container.Image,
				Env:               env,
				Warnings:          warnings,
			},
		}

		if !selector.Matches((*registryFields)(registry)) {
			continue
		}
		result = append(result, *registry)
	}
	return &api.RegistryList{
		TypeMeta: listTypeMeta,
		Items:    result,
	}, nil
}

func (c *Controller) ipAndPortsFrom(ports []container.Port) (listenAddress string, hostPort int, containerPort int, err error) {
	for _, port := range ports {
		if port.PrivatePort == 5000 {
			return port.IP, int(port.PublicPort), int(port.PrivatePort), nil
		}
	}
	return "", 0, 0, fmt.Errorf("could not find registry port")
}

// Compare the desired registry against the existing registry, and reconcile
// the two to match.
func (c *Controller) Apply(ctx context.Context, desired *api.Registry) (*api.Registry, error) {
	FillDefaults(desired)
	existing, err := c.Get(ctx, desired.Name)
	if err != nil && !errors.IsNotFound(err) {
		return nil, err
	}

	if existing == nil {
		existing = &api.Registry{}
	}

	needsDelete := false
	if existing.Port != 0 && desired.Port != 0 && existing.Port != desired.Port {
		// If the port has changed, let's delete the registry and recreate it.
		needsDelete = true
	}
	// If the desired image is different
	// from the existing image, we need
	// to delete the registry and recreate it.
	if existing.Status.Image != "" && desired.Image != "" &&
		!imagesRefsEqual(existing.Status.Image, desired.Image) {
		needsDelete = true
	}
	if existing.Status.State != containerStateRunning {
		// If the registry has died, we need to recreate.
		needsDelete = true
	}
	for key, value := range desired.Labels {
		if existing.Status.Labels[key] != value {
			// If the user asked for a label that's not currently on
			// the container, the only way to add it is to re-create the whole container.
			needsDelete = true
		}
	}

	r := regexp.MustCompile("^(?P<key>[^=]+)=(?P<value>.*)")
	desiredEnvs := make(map[string]string)
	for _, value := range desired.Env {
		m := r.FindStringSubmatch(value)
		if m != nil {
			k := m[r.SubexpIndex("key")]
			v := m[r.SubexpIndex("value")]
			if k != "PATH" {
				desiredEnvs[k] = v
			}
		}
	}
	existingEnvs := make(map[string]string)
	for _, value := range existing.Status.Env {
		m := r.FindStringSubmatch(value)
		if m != nil {
			k := m[r.SubexpIndex("key")]
			v := m[r.SubexpIndex("value")]
			if k != "PATH" {
				existingEnvs[k] = v
			}
		}
	}
	if _, ok := desiredEnvs["REGISTRY_STORAGE_DELETE_ENABLED"]; !ok {
		desiredEnvs["REGISTRY_STORAGE_DELETE_ENABLED"] = "true"
		desired.Env = append(desired.Env, "REGISTRY_STORAGE_DELETE_ENABLED=true")
	}
	if eq := reflect.DeepEqual(desiredEnvs, existingEnvs); !eq {
		needsDelete = true
	}

	if needsDelete && existing.Name != "" {
		err = c.Delete(ctx, existing.Name)
		if err != nil {
			return nil, err
		}
		existing = existing.DeepCopy()
		existing.Status.ContainerID = ""
	}

	if existing.Status.ContainerID != "" {
		// If we got to this point, and the container id exists, then the registry is up to date!
		return existing, nil
	}

	_, _ = fmt.Fprintf(c.iostreams.ErrOut, "Creating registry %q...\n", desired.Name)

	err = dctr.RemoveIfNecessary(ctx, c.dockerCLI.Client(), desired.Name)
	if err != nil {
		return nil, err
	}

	exposedPorts, portBindings, hostPort, err := c.portConfigs(existing, desired)
	if err != nil {
		return nil, err
	}

	image := c.imageConfig(existing, desired)

	err = dctr.Run(
		ctx,
		c.dockerCLI,
		desired.Name,
		&container.Config{
			Hostname:     desired.Name,
			Image:        image,
			ExposedPorts: exposedPorts,
			Labels:       c.labelConfigs(existing, desired),
			Env:          desired.Env,
		},
		&container.HostConfig{
			RestartPolicy: container.RestartPolicy{Name: "always"},
			PortBindings:  portBindings,
		},
		&network.NetworkingConfig{})
	if err != nil {
		return nil, err
	}

	err = c.maybeCreateForwarder(ctx, hostPort)
	if err != nil {
		return nil, err
	}

	return c.Get(ctx, desired.Name)
}

// Compute the ports to ContainerCreate() call
func (c *Controller) portConfigs(existing *api.Registry, desired *api.Registry) (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, int, error) {
	// Preserve existing address by default
	hostPort := existing.Status.HostPort
	listenAddress := existing.Status.ListenAddress

	// Overwrite with desired behavior if specified.
	if desired.Port != 0 {
		hostPort = desired.Port
	}
	if desired.ListenAddress != "" {
		listenAddress = desired.ListenAddress
	}

	// Fill in defaults.
	if hostPort == 0 {
		freePort, err := freeport.GetFreePort()
		if err != nil {
			return nil, nil, 0, fmt.Errorf("creating registry: %v", err)
		}
		hostPort = freePort
	}

	if listenAddress == "" {
		// explicitly bind to IPv4 to prevent issues with the port forward when connected to a Docker network with IPv6 enabled
		// see https://github.com/docker/for-mac/issues/6015
		listenAddress = "127.0.0.1"
	}

	port := nat.Port("5000/tcp")
	portSet := map[nat.Port]struct{}{
		port: struct{}{},
	}
	portMap := map[nat.Port][]nat.PortBinding{
		port: []nat.PortBinding{
			{
				HostIP:   listenAddress,
				HostPort: fmt.Sprintf("%d", hostPort),
			},
		},
	}
	return portSet, portMap, hostPort, nil
}

// Compute the label configs to the container create call.
func (c *Controller) labelConfigs(existing *api.Registry, desired *api.Registry) map[string]string {
	newLabels := make(map[string]string, len(existing.Status.Labels)+len(desired.Labels)+len(ctlptlLabels))

	// Preserve existing labels.
	for k, v := range existing.Status.Labels {
		newLabels[k] = v
	}

	// Overwrite with new labels.
	for k, v := range desired.Labels {
		newLabels[k] = v
	}

	for k, v := range ctlptlLabels {
		newLabels[k] = v
	}

	return newLabels
}

// Compute the image to ContainerCreate() call
func (c *Controller) imageConfig(existing *api.Registry, desired *api.Registry) string {
	// Desired image takes precedence.
	if desired.Image != "" {
		return desired.Image
	}

	// Preserve existing image when possible.
	if existing.Status.Image != "" {
		return existing.Status.Image
	}

	return DefaultRegistryImageRef
}

func (c *Controller) maybeCreateForwarder(ctx context.Context, port int) error {
	if docker.IsLocalHost(c.dockerCLI.Client().DaemonHost()) {
		return nil
	}

	_, _ = fmt.Fprintf(c.iostreams.ErrOut,
		" 🎮 Env DOCKER_HOST set. Assuming remote Docker and forwarding registry to localhost:%d\n", port)
	return c.socat.ConnectRemoteDockerPort(ctx, port)
}

func (c *Controller) registryContainers(ctx context.Context) ([]container.Summary, error) {
	containers := make(map[string]container.Summary)

	roleContainers, err := c.dockerCLI.Client().ContainerList(ctx, container.ListOptions{
		Filters: filters.NewArgs(
			filters.Arg("label", fmt.Sprintf("%s=registry", docker.ContainerLabelRole))),
		All: true,
	})
	if err != nil {
		return nil, err
	}
	for i := range roleContainers {
		containers[roleContainers[i].ID] = roleContainers[i]
	}

	ancestorContainers, err := c.dockerCLI.Client().ContainerList(ctx, container.ListOptions{
		Filters: filters.NewArgs(
			filters.Arg("ancestor", DefaultRegistryImageRef)),
		All: true,
	})
	if err != nil {
		return nil, err
	}
	for i := range ancestorContainers {
		containers[ancestorContainers[i].ID] = ancestorContainers[i]
	}

	result := make([]container.Summary, 0, len(containers))
	for _, c := range containers {
		result = append(result, c)
	}
	sort.Slice(result, func(i, j int) bool {
		return result[i].ID < result[j].ID
	})
	return result, nil
}

// Delete the given registry.
func (c *Controller) Delete(ctx context.Context, name string) error {
	registry, err := c.Get(ctx, name)
	if err != nil {
		return err
	}

	cID := registry.Status.ContainerID
	if cID == "" {
		return fmt.Errorf("container not running registry: %s", name)
	}

	return c.dockerCLI.Client().ContainerRemove(ctx, registry.Status.ContainerID, container.RemoveOptions{
		Force: true,
	})
}

// imageRefsEqual returns true of the normalized versions of the refs are equal.
//
// If the normalized versions are not equal OR either ref is invalid, false
// is returned.
func imagesRefsEqual(a, b string) bool {
	aRef, err := reference.ParseNormalizedNamed(a)
	if err != nil {
		return false
	}

	bRef, err := reference.ParseNormalizedNamed(b)
	if err != nil {
		return false
	}

	return aRef.String() == bRef.String()
}
07070100000081000081A400000000000000000000000168AFB0EA00004B55000000000000000000000000000000000000002C00000000ctlptl-0.8.43/pkg/registry/registry_test.gopackage registry

import (
	"context"
	"fmt"
	"io"
	"os"
	"testing"
	"time"

	"github.com/distribution/reference"
	"github.com/docker/docker/api/types"
	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/image"
	"github.com/docker/docker/api/types/network"
	registrytypes "github.com/docker/docker/api/types/registry"
	"github.com/docker/docker/api/types/system"
	"github.com/docker/docker/registry"
	specs "github.com/opencontainers/image-spec/specs-go/v1"
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/cli-runtime/pkg/genericclioptions"

	"github.com/tilt-dev/ctlptl/internal/dctr"
	"github.com/tilt-dev/ctlptl/pkg/api"
)

func kindRegistry() container.Summary {
	return container.Summary{
		ID:      "a815c0ec15f1f7430bd402e3fffe65026dd692a1a99861a52b3e30ad6e253a08",
		Names:   []string{"/kind-registry"},
		Image:   DefaultRegistryImageRef,
		ImageID: "sha256:2d4f4b5309b1e41b4f83ae59b44df6d673ef44433c734b14c1c103ebca82c116",
		Command: "/entrypoint.sh /etc/docker/registry/config.yml",
		Created: 1603483645,
		Labels:  map[string]string{"dev.tilt.ctlptl.role": "registry"},
		Ports: []container.Port{
			container.Port{IP: "127.0.0.1", PrivatePort: 5000, PublicPort: 5001, Type: "tcp"},
		},
		SizeRw:     0,
		SizeRootFs: 0,
		State:      "running",
		Status:     "Up 2 hours",
		NetworkSettings: &container.NetworkSettingsSummary{
			Networks: map[string]*network.EndpointSettings{
				"bridge": &network.EndpointSettings{
					IPAddress: "172.0.1.2",
				},
				"kind": &network.EndpointSettings{
					IPAddress: "172.0.1.3",
				},
			},
		},
	}
}

func kindRegistryLoopback() container.Summary {
	return container.Summary{
		ID:      "d62f2587ff7b03858f144d3cf83c789578a6d6403f8b82a459ab4e317917cd42",
		Names:   []string{"/kind-registry-loopback"},
		Image:   DefaultRegistryImageRef,
		ImageID: "sha256:2d4f4b5309b1e41b4f83ae59b44df6d673ef44433c734b14c1c103ebca82c116",
		Command: "/entrypoint.sh /etc/docker/registry/config.yml",
		Created: 1603483646,
		Labels:  map[string]string{"dev.tilt.ctlptl.role": "registry"},
		Ports: []container.Port{
			container.Port{IP: "127.0.0.1", PrivatePort: 5000, PublicPort: 5001, Type: "tcp"},
		},
		SizeRw:     0,
		SizeRootFs: 0,
		State:      "running",
		Status:     "Up 2 hours",
		NetworkSettings: &container.NetworkSettingsSummary{
			Networks: map[string]*network.EndpointSettings{
				"bridge": &network.EndpointSettings{
					IPAddress: "172.0.1.2",
				},
				"kind": &network.EndpointSettings{
					IPAddress: "172.0.1.3",
				},
			},
		},
	}
}

func kindRegistryCustomImage() container.Summary {
	return container.Summary{
		ID:      "c7f123e65474f951c3bc4232c888616c0f9b1052c7ae706a3b6d4701bea6e90d",
		Names:   []string{"/kind-registry-custom-image"},
		Image:   "fake.tilt.dev/my-registry-image:latest",
		ImageID: "sha256:0ac33e5f5afa79e084075e8698a22d574816eea8d7b7d480586835657c3e1c8b",
		Command: "/entrypoint.sh /etc/docker/registry/config.yml",
		Created: 1603483647,
		Labels:  map[string]string{"dev.tilt.ctlptl.role": "registry"},
		Ports: []container.Port{
			container.Port{IP: "127.0.0.1", PrivatePort: 5000, PublicPort: 5001, Type: "tcp"},
		},
		SizeRw:     0,
		SizeRootFs: 0,
		State:      "running",
		Status:     "Up 2 hours",
		NetworkSettings: &container.NetworkSettingsSummary{
			Networks: map[string]*network.EndpointSettings{
				"bridge": &network.EndpointSettings{
					IPAddress: "172.0.1.2",
				},
				"kind": &network.EndpointSettings{
					IPAddress: "172.0.1.3",
				},
			},
		},
	}
}

func registryBadPorts() container.Summary {
	return container.Summary{
		ID:      "a815c0ec15f1f7430bd402e3fffe65026dd692a1a99861a52b3e30ad6e253a08",
		Names:   []string{"/kind-registry"},
		Image:   DefaultRegistryImageRef,
		ImageID: "sha256:2d4f4b5309b1e41b4f83ae59b44df6d673ef44433c734b14c1c103ebca82c116",
		Command: "/entrypoint.sh /etc/docker/registry/config.yml",
		Created: 1603483645,
		Labels:  map[string]string{"dev.tilt.ctlptl.role": "registry"},
		Ports: []container.Port{
			container.Port{IP: "127.0.0.1", PrivatePort: 5001, PublicPort: 5002, Type: "tcp"},
		},
		SizeRw:     0,
		SizeRootFs: 0,
		State:      "running",
		Status:     "Up 2 hours",
		NetworkSettings: &container.NetworkSettingsSummary{
			Networks: map[string]*network.EndpointSettings{
				"bridge": &network.EndpointSettings{
					IPAddress: "172.0.1.2",
				},
				"kind": &network.EndpointSettings{
					IPAddress: "172.0.1.3",
				},
			},
		},
	}
}

func TestListRegistries(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	regWithoutLabels := kindRegistryLoopback()
	regWithoutLabels.Labels = nil

	f.docker.containers = []container.Summary{kindRegistry(), regWithoutLabels, kindRegistryCustomImage()}

	list, err := f.c.List(context.Background(), ListOptions{})
	require.NoError(t, err)

	// registry list response is sorted by container ID:
	// 	kind-registry:a815, kind-registry-custom-image:c7f1, kind-registry-loopback:d62f
	require.Len(t, list.Items, 3)
	assert.Equal(t, api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Port:     5001,
		Status: api.RegistryStatus{
			CreationTimestamp: metav1.Time{Time: time.Unix(1603483645, 0)},
			HostPort:          5001,
			ContainerPort:     5000,
			IPAddress:         "172.0.1.2",
			ListenAddress:     "127.0.0.1",
			Networks:          []string{"bridge", "kind"},
			ContainerID:       "a815c0ec15f1f7430bd402e3fffe65026dd692a1a99861a52b3e30ad6e253a08",
			State:             "running",
			Labels:            map[string]string{"dev.tilt.ctlptl.role": "registry"},
			Image:             DefaultRegistryImageRef,
			Env:               []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
		},
	}, list.Items[0])
	assert.Equal(t, api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry-custom-image",
		Port:     5001,
		Status: api.RegistryStatus{
			CreationTimestamp: metav1.Time{Time: time.Unix(1603483647, 0)},
			HostPort:          5001,
			ContainerPort:     5000,
			IPAddress:         "172.0.1.2",
			ListenAddress:     "127.0.0.1",
			Networks:          []string{"bridge", "kind"},
			ContainerID:       "c7f123e65474f951c3bc4232c888616c0f9b1052c7ae706a3b6d4701bea6e90d",
			State:             "running",
			Labels:            map[string]string{"dev.tilt.ctlptl.role": "registry"},
			Image:             "fake.tilt.dev/my-registry-image:latest",
			Env:               []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
		},
	}, list.Items[1])
	assert.Equal(t, api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry-loopback",
		Port:     5001,
		Status: api.RegistryStatus{
			CreationTimestamp: metav1.Time{Time: time.Unix(1603483646, 0)},
			HostPort:          5001,
			ContainerPort:     5000,
			IPAddress:         "172.0.1.2",
			ListenAddress:     "127.0.0.1",
			Networks:          []string{"bridge", "kind"},
			ContainerID:       "d62f2587ff7b03858f144d3cf83c789578a6d6403f8b82a459ab4e317917cd42",
			State:             "running",
			Image:             DefaultRegistryImageRef,
			Env:               []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
		},
	}, list.Items[2])
}

func TestListRegistries_badPorts(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	regWithoutLabels := kindRegistryLoopback()
	regWithoutLabels.Labels = nil

	f.docker.containers = []container.Summary{registryBadPorts()}

	list, err := f.c.List(context.Background(), ListOptions{})
	require.NoError(t, err)

	require.Len(t, list.Items, 1)
	assert.Equal(t, api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Status: api.RegistryStatus{
			CreationTimestamp: metav1.Time{Time: time.Unix(1603483645, 0)},
			IPAddress:         "172.0.1.2",
			Networks:          []string{"bridge", "kind"},
			ContainerID:       "a815c0ec15f1f7430bd402e3fffe65026dd692a1a99861a52b3e30ad6e253a08",
			State:             "running",
			Labels:            map[string]string{"dev.tilt.ctlptl.role": "registry"},
			Image:             DefaultRegistryImageRef,
			Env:               []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
			Warnings: []string{
				"Unexpected registry ports: [{IP:127.0.0.1 PrivatePort:5001 PublicPort:5002 Type:tcp}]",
			},
		},
	}, list.Items[0])
}

func TestGetRegistry(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	f.docker.containers = []container.Summary{kindRegistry()}

	registry, err := f.c.Get(context.Background(), "kind-registry")
	require.NoError(t, err)
	assert.Equal(t, &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Port:     5001,
		Status: api.RegistryStatus{
			CreationTimestamp: metav1.Time{Time: time.Unix(1603483645, 0)},
			HostPort:          5001,
			ContainerPort:     5000,
			IPAddress:         "172.0.1.2",
			ListenAddress:     "127.0.0.1",
			Networks:          []string{"bridge", "kind"},
			ContainerID:       "a815c0ec15f1f7430bd402e3fffe65026dd692a1a99861a52b3e30ad6e253a08",
			State:             "running",
			Labels:            map[string]string{"dev.tilt.ctlptl.role": "registry"},
			Image:             DefaultRegistryImageRef,
			Env:               []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
		},
	}, registry)
}

func TestApplyDeadRegistry(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	deadRegistry := kindRegistry()
	deadRegistry.State = "dead"
	f.docker.containers = []container.Summary{deadRegistry}

	registry, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Port:     5001,
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}
	assert.Equal(t, deadRegistry.ID, f.docker.lastRemovedContainer)
}

func TestApplyLabels(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	// Make sure the previous registry is wiped out
	// because it doesn't have the labels we want.
	f.docker.containers = []container.Summary{kindRegistry()}

	registry, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Labels:   map[string]string{"managed-by": "ctlptl"},
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}
	config := f.docker.lastCreateConfig
	if assert.NotNil(t, config) {
		assert.Equal(t, map[string]string{
			"managed-by":           "ctlptl",
			"dev.tilt.ctlptl.role": "registry",
		}, config.Labels)
		assert.Equal(t, "kind-registry", config.Hostname)
		assert.Equal(t, DefaultRegistryImageRef, config.Image)
		assert.Equal(t, []string{"REGISTRY_STORAGE_DELETE_ENABLED=true"}, config.Env)
	}
}

func TestPreservePort(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	existingRegistry := kindRegistry()
	existingRegistry.State = "dead"
	existingRegistry.Ports[0].PublicPort = 5010
	f.docker.containers = []container.Summary{existingRegistry}

	registry, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}

	config := f.docker.lastCreateConfig
	if assert.NotNil(t, config) {
		assert.Equal(t, map[string]string{"dev.tilt.ctlptl.role": "registry"}, config.Labels)
		assert.Equal(t, "kind-registry", config.Hostname)
		assert.Equal(t, DefaultRegistryImageRef, config.Image)
	}
}

func TestCustomImage(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	// Make sure the previous registry is wiped out
	// because it doesn't have the image we want.
	f.docker.containers = []container.Summary{kindRegistry()}

	// ensure stable w/o image change
	_, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Image:    DefaultRegistryImageRef,
	})
	if assert.NoError(t, err) {
		assert.Nil(t, f.docker.lastCreateConfig, "Registry should not have been re-created")
	}

	// change image, should be (re)created
	registry, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Image:    "fake.tilt.dev/different-registry-image:latest",
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}
	config := f.docker.lastCreateConfig
	if assert.NotNil(t, config) {
		assert.Equal(t, map[string]string{"dev.tilt.ctlptl.role": "registry"}, config.Labels)
		assert.Equal(t, "kind-registry", config.Hostname)
		assert.Equal(t, "fake.tilt.dev/different-registry-image:latest", config.Image)
	}

	// Apply a config with new labels,
	// ensure image is not changed.
	registry, err = f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Labels:   map[string]string{"extra-label": "ctlptl"},
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}
	config = f.docker.lastCreateConfig
	if assert.NotNil(t, config) {
		assert.Equal(t, map[string]string{
			"dev.tilt.ctlptl.role": "registry",
			"extra-label":          "ctlptl",
		}, config.Labels)
		assert.Equal(t, "kind-registry", config.Hostname)
		assert.Equal(t, "fake.tilt.dev/different-registry-image:latest", config.Image)
	}
}

func TestCustomEnv(t *testing.T) {
	f := newFixture(t)
	defer f.TearDown()

	// Make sure the previous registry is wiped out
	// because it doesn't have the image we want.
	f.docker.containers = []container.Summary{kindRegistry()}

	// ensure stable w/o image change
	_, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Image:    DefaultRegistryImageRef,
	})
	if assert.NoError(t, err) {
		assert.Nil(t, f.docker.lastCreateConfig, "Registry should not have been re-created")
	}

	// change env, should be (re)created
	registry, err := f.c.Apply(context.Background(), &api.Registry{
		TypeMeta: typeMeta,
		Name:     "kind-registry",
		Image:    DefaultRegistryImageRef,
		Env:      []string{"REGISTRY_STORAGE_DELETE_ENABLED=false"},
	})
	if assert.NoError(t, err) {
		assert.Equal(t, "running", registry.Status.State)
	}
	config := f.docker.lastCreateConfig
	if assert.NotNil(t, config) {
		assert.Equal(t, map[string]string{"dev.tilt.ctlptl.role": "registry"}, config.Labels)
		assert.Equal(t, "kind-registry", config.Hostname)
		assert.Equal(t, DefaultRegistryImageRef, config.Image)
		assert.Equal(t, []string{"REGISTRY_STORAGE_DELETE_ENABLED=false"}, config.Env)
	}
}

type fakeCLI struct {
	client *fakeDocker
}

func (c *fakeCLI) Client() dctr.Client {
	return c.client
}

func (c *fakeCLI) AuthInfo(ctx context.Context, repoInfo *registry.RepositoryInfo, cmdName string) (string, registrytypes.RequestAuthConfig, error) {
	return "", nil, nil
}

type fakeDocker struct {
	containers           []container.Summary
	lastRemovedContainer string
	lastCreateConfig     *container.Config
	lastCreateHostConfig *container.HostConfig
}

type objectNotFoundError struct {
	object string
	id     string
}

func (e objectNotFoundError) NotFound() {}

func (e objectNotFoundError) Error() string {
	return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
}

func (d *fakeDocker) DaemonHost() string {
	return ""
}

func (d *fakeDocker) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
	for _, c := range d.containers {
		if c.ID == containerID {
			return container.InspectResponse{
				ContainerJSONBase: &container.ContainerJSONBase{
					State: &container.State{
						Running: c.State == "running",
					},
				},
				Config: &container.Config{
					Hostname:     "test",
					Domainname:   "",
					User:         "",
					AttachStdin:  false,
					AttachStdout: false,
					AttachStderr: false,
					// ExposedPorts:nat.PortSet{"5000/tcp":struct {}{}},
					Tty:             false,
					OpenStdin:       false,
					StdinOnce:       false,
					Env:             []string{"REGISTRY_STORAGE_DELETE_ENABLED=true", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
					Cmd:             []string{"/etc/docker/registry/config.yml"},
					Healthcheck:     (*container.HealthConfig)(nil),
					ArgsEscaped:     false,
					Image:           DefaultRegistryImageRef,
					Volumes:         map[string]struct{}{"/var/lib/registry": struct{}{}},
					WorkingDir:      "",
					Entrypoint:      []string{"/entrypoint.sh"},
					NetworkDisabled: false,
					MacAddress:      "",
					OnBuild:         []string(nil),
					Labels:          map[string]string{"dev.tilt.ctlptl.role": "registry"},
					StopSignal:      "",
					StopTimeout:     (*int)(nil),
					Shell:           []string(nil),
				},
			}, nil
		}
	}

	return container.InspectResponse{}, objectNotFoundError{"container", containerID}
}

func (d *fakeDocker) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) {
	var result []container.Summary
	for _, c := range d.containers {
		if options.Filters.Contains("ancestor") {
			img, err := reference.ParseNormalizedNamed(c.Image)
			if err != nil || !options.Filters.Match("ancestor", img.String()) {
				continue
			}
		}
		if options.Filters.Contains("label") && !options.Filters.MatchKVList("label", c.Labels) {
			continue
		}
		result = append(result, c)
	}
	return result, nil
}

func (d *fakeDocker) ContainerRemove(ctx context.Context, id string, options container.RemoveOptions) error {
	d.lastRemovedContainer = id
	return nil
}

func (d *fakeDocker) ImagePull(ctx context.Context, image string,
	options image.PullOptions) (io.ReadCloser, error) {
	return nil, nil
}

func (d *fakeDocker) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig,
	networkingConfig *network.NetworkingConfig, platform *specs.Platform,
	containerName string) (container.CreateResponse, error) {
	d.lastCreateConfig = config
	d.lastCreateHostConfig = hostConfig

	c := kindRegistry()
	c.Image = config.Image
	d.containers = []container.Summary{c}

	return container.CreateResponse{}, nil
}
func (d *fakeDocker) ContainerStart(ctx context.Context, containerID string,
	options container.StartOptions) error {
	return nil
}
func (d *fakeDocker) ServerVersion(ctx context.Context) (types.Version, error) {
	return types.Version{}, nil
}
func (d *fakeDocker) Info(ctx context.Context) (system.Info, error) {
	return system.Info{}, nil
}
func (d *fakeDocker) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
	return nil
}
func (d *fakeDocker) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
	return nil
}

type fixture struct {
	t      *testing.T
	c      *Controller
	docker *fakeDocker
}

func newFixture(t *testing.T) *fixture {
	_ = os.Setenv("DOCKER_HOST", "")

	d := &fakeDocker{}
	controller := NewController(
		genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr},
		&fakeCLI{client: d})
	return &fixture{
		t:      t,
		docker: d,
		c:      controller,
	}
}

func (fixture) TearDown() {}
07070100000082000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001A00000000ctlptl-0.8.43/pkg/visitor07070100000083000081A400000000000000000000000168AFB0EA00000295000000000000000000000000000000000000002400000000ctlptl-0.8.43/pkg/visitor/decode.gopackage visitor

import (
	"github.com/pkg/errors"
	"k8s.io/apimachinery/pkg/runtime"

	"github.com/tilt-dev/ctlptl/pkg/encoding"
)

func DecodeAll(vs []Interface) ([]runtime.Object, error) {
	result := []runtime.Object{}
	for _, v := range vs {
		objs, err := Decode(v)
		if err != nil {
			return nil, err
		}
		result = append(result, objs...)
	}
	return result, nil
}

func Decode(v Interface) ([]runtime.Object, error) {
	r, err := v.Open()
	if err != nil {
		return nil, err
	}
	defer func() {
		_ = r.Close()
	}()

	result, err := encoding.ParseStream(r)
	if err != nil {
		return nil, errors.Wrapf(err, "visiting %s", v.Name())
	}
	return result, nil
}
07070100000084000081A400000000000000000000000168AFB0EA0000026B000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/visitor/strings.gopackage visitor

import (
	"io"
	"net/http"
	"net/url"
	"strings"

	"github.com/pkg/errors"
)

func FromStrings(filenames []string, stdin io.Reader) ([]Interface, error) {
	result := []Interface{}
	for _, f := range filenames {

		switch {
		case f == "-":
			result = append(result, Stdin(stdin))

		case strings.Index(f, "http://") == 0 || strings.Index(f, "https://") == 0:
			url, err := url.Parse(f)
			if err != nil {
				return nil, errors.Wrapf(err, "invalid URL %s", url)
			}
			result = append(result, URL(http.DefaultClient, f))

		default:
			result = append(result, File(f))

		}
	}
	return result, nil
}
07070100000085000081A400000000000000000000000168AFB0EA00000608000000000000000000000000000000000000002500000000ctlptl-0.8.43/pkg/visitor/visitor.gopackage visitor

import (
	"fmt"
	"io"
	"net/http"
	"os"
)

// A simplified version of cli-runtime/pkg/resource Visitor
// for objects that don't query the cluster.
type Interface interface {
	Name() string
	Open() (io.ReadCloser, error)
}

func Stdin(stdin io.Reader) stdinVisitor {
	return stdinVisitor{reader: stdin}
}

type noOpCloseReader struct {
	io.Reader
}

func (noOpCloseReader) Close() error { return nil }

type stdinVisitor struct {
	reader io.Reader
}

func (v stdinVisitor) Name() string {
	return "stdin"
}

func (v stdinVisitor) Open() (io.ReadCloser, error) {
	return noOpCloseReader{Reader: v.reader}, nil
}

var _ Interface = stdinVisitor{}

func File(path string) fileVisitor {
	return fileVisitor{path: path}
}

type fileVisitor struct {
	path string
}

func (v fileVisitor) Name() string {
	return v.path
}

func (v fileVisitor) Open() (io.ReadCloser, error) {
	return os.Open(v.path)
}

var _ Interface = urlVisitor{}

type HTTPClient interface {
	Get(url string) (*http.Response, error)
}

func URL(client HTTPClient, url string) urlVisitor {
	return urlVisitor{client: client, url: url}
}

type urlVisitor struct {
	client HTTPClient
	url    string
}

func (v urlVisitor) Name() string {
	return v.url
}

func (v urlVisitor) Open() (io.ReadCloser, error) {
	resp, err := v.client.Get(v.url)
	if err != nil {
		return nil, err
	}
	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("fetch(%q) failed with status code %d", v.url, resp.StatusCode)
	}
	return resp.Body, nil
}

var _ Interface = urlVisitor{}
07070100000086000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001300000000ctlptl-0.8.43/test07070100000087000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000002200000000ctlptl-0.8.43/test/docker-desktop07070100000088000081A400000000000000000000000168AFB0EA0000017C000000000000000000000000000000000000002D00000000ctlptl-0.8.43/test/docker-desktop/Dockerfile# syntax=docker/dockerfile:1

FROM golang:1.24-alpine
RUN apk update && apk add bash git curl tar
ENV CGO_ENABLED=0
ENV KO_VERSION=0.14.1
RUN curl -fsSL https://github.com/ko-build/ko/releases/download/v${KO_VERSION}/ko_${KO_VERSION}_Linux_$(uname -m).tar.gz \
    | tar -xzv ko && \
    mv ko /usr/local/bin/ko
WORKDIR /go/github.com/tilt-dev/ctlptl/test/cluster-network
ADD . .
07070100000089000081A400000000000000000000000168AFB0EA00000378000000000000000000000000000000000000002F00000000ctlptl-0.8.43/test/docker-desktop/builder.yamlapiVersion: batch/v1
kind: Job
metadata:
  name: ko-builder
spec:
  template:
    metadata:
      labels:
        app: ko-builder
    spec:
      containers:
      - name: builder
        image: ko-builder
        imagePullPolicy: Never
        securityContext:
          privileged: true
        command:
          - bash
          - "-c"
          - |
            set -e
            go mod init github.com/tilt-dev/test-ctlptl
            go get github.com/tilt-dev/ctlptl/test/simple-server@latest
            ko publish -B --insecure-registry github.com/tilt-dev/ctlptl/test/simple-server
        volumeMounts:
        - mountPath: /var/run/docker.sock
          name: docker-sock
          readOnly: false
      volumes:
      - name: docker-sock
        hostPath:
          path: "/run/guest-services/docker.sock"
          type: Socket
      restartPolicy: Never
  backoffLimit: 0
0707010000008A000081A400000000000000000000000168AFB0EA00000046000000000000000000000000000000000000002F00000000ctlptl-0.8.43/test/docker-desktop/cluster.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Cluster
product: docker-desktop
0707010000008B000081ED00000000000000000000000168AFB0EA00000386000000000000000000000000000000000000002900000000ctlptl-0.8.43/test/docker-desktop/e2e.sh#!/bin/bash
# Tests creating a cluster with a registry,
# building a container in that cluster,
# then running that container.

set -exo pipefail

export DOCKER_BUILDKIT="1"

cd $(dirname $(realpath $0))
CLUSTER_NAME="kind-ctlptl-test-cluster"
ctlptl apply -f cluster.yaml

# The ko-builder runs in an image tagged with the host as visible from the local machine.
docker buildx build --load -t ko-builder .
kubectl apply -f builder.yaml

set +e
kubectl wait --for=condition=complete job/ko-builder --timeout=180s
RESULT="$?"
set -e

if [[ "$RESULT" != "0" ]]; then
    echo "ko-builder never became healthy"
    kubectl describe pods -l app=ko-builder
    kubectl logs -l app=ko-builder --all-containers
    exit 1
fi

kubectl apply -f simple-server.yaml
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=180s

ctlptl delete -f cluster.yaml

echo "docker-desktop e2e test passed!"
0707010000008C000081A400000000000000000000000168AFB0EA000001E4000000000000000000000000000000000000003500000000ctlptl-0.8.43/test/docker-desktop/simple-server.yamlapiVersion: apps/v1
kind: Deployment
metadata:
  name: simple-server
  labels:
    app: simple-server
spec:
  selector:
    matchLabels:
      app: simple-server
  template:
    metadata:
      labels:
        app: simple-server
    spec:
      containers:
      - name: simple-server
        image: ko.local/simple-server
        imagePullPolicy: Never
        ports:
        - containerPort: 8080
        livenessProbe:
          httpGet:
            path: /
            port: 8080
0707010000008D000081ED00000000000000000000000168AFB0EA000000BB000000000000000000000000000000000000001A00000000ctlptl-0.8.43/test/e2e.sh#!/bin/bash
# Integration tests that create a full cluster.

set -exo pipefail

cd $(dirname $(dirname $(realpath $0)))
make install
test/k3d/e2e.sh
test/kind/e2e.sh
test/minikube/e2e.sh
0707010000008E000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001700000000ctlptl-0.8.43/test/k3d0707010000008F000081A400000000000000000000000168AFB0EA0000017C000000000000000000000000000000000000002200000000ctlptl-0.8.43/test/k3d/Dockerfile# syntax=docker/dockerfile:1

FROM golang:1.24-alpine
RUN apk update && apk add bash git curl tar
ENV CGO_ENABLED=0
ENV KO_VERSION=0.14.1
RUN curl -fsSL https://github.com/ko-build/ko/releases/download/v${KO_VERSION}/ko_${KO_VERSION}_Linux_$(uname -m).tar.gz \
    | tar -xzv ko && \
    mv ko /usr/local/bin/ko
WORKDIR /go/github.com/tilt-dev/ctlptl/test/cluster-network
ADD . .
07070100000090000081A400000000000000000000000168AFB0EA0000029E000000000000000000000000000000000000002400000000ctlptl-0.8.43/test/k3d/builder.yamlapiVersion: batch/v1
kind: Job
metadata:
  name: ko-builder
spec:
  template:
    metadata:
      labels:
        app: ko-builder
    spec:
      containers:
      - name: builder
        image: HOST_FROM_CONTAINER_RUNTIME/ko-builder
        command:
          - bash
          - "-c"
          - |
            set -e
            go mod init github.com/tilt-dev/test-ctlptl
            go get github.com/tilt-dev/ctlptl/test/simple-server@latest
            ko publish -B --insecure-registry github.com/tilt-dev/ctlptl/test/simple-server
        env:
        - name: KO_DOCKER_REPO
          value: HOST_FROM_CLUSTER_NETWORK
      restartPolicy: Never
  backoffLimit: 0
07070100000091000081A400000000000000000000000168AFB0EA00000078000000000000000000000000000000000000002400000000ctlptl-0.8.43/test/k3d/cluster.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: k3d-ctlptl-test-cluster
product: k3d
registry: ctlptl-test-registry
07070100000092000081ED00000000000000000000000168AFB0EA00000775000000000000000000000000000000000000001E00000000ctlptl-0.8.43/test/k3d/e2e.sh#!/bin/bash
# Tests creating a cluster with a registry,
# building a container in that cluster,
# then running that container.

set -exo pipefail

export DOCKER_BUILDKIT="1"

cd $(dirname $(realpath $0))
CLUSTER_NAME="k3d-ctlptl-test-cluster"
ctlptl apply -f registry.yaml
ctlptl apply -f cluster.yaml

# The ko-builder runs in an image tagged with the host as visible from the local machine.
docker buildx build --load -t localhost:5005/ko-builder .
docker push localhost:5005/ko-builder

# The ko-builder builds an image tagged with the host as visible from the cluster network.
HOST_FROM_CONTAINER_RUNTIME=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.hostFromContainerRuntime}}')
HOST_FROM_CLUSTER_NETWORK=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.hostFromClusterNetwork}}')
cat builder.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    sed "s/HOST_FROM_CLUSTER_NETWORK/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -

set +e
kubectl wait --for=condition=complete job/ko-builder --timeout=180s
RESULT="$?"
set -e

if [[ "$RESULT" != "0" ]]; then
    echo "ko-builder never became healthy"
    kubectl describe pods -l app=ko-builder
    kubectl logs -l app=ko-builder --all-containers
    exit 1
fi

# Test registry from both localhost and the connected network.
cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s
kubectl delete deployment simple-server

cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s

ctlptl delete -f cluster.yaml

echo "k3d e2e test passed!"
07070100000093000081A400000000000000000000000168AFB0EA00000055000000000000000000000000000000000000002500000000ctlptl-0.8.43/test/k3d/registry.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: ctlptl-test-registry
port: 5005
07070100000094000081A400000000000000000000000168AFB0EA000001D8000000000000000000000000000000000000002A00000000ctlptl-0.8.43/test/k3d/simple-server.yamlapiVersion: apps/v1
kind: Deployment
metadata:
  name: simple-server
  labels:
    app: simple-server
spec:
  selector:
    matchLabels:
      app: simple-server
  template:
    metadata:
      labels:
        app: simple-server
    spec:
      containers:
      - name: simple-server
        image: HOST_FROM_CONTAINER_RUNTIME/simple-server
        ports:
        - containerPort: 8080
        livenessProbe:
          httpGet:
            path: /
            port: 8080
07070100000095000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001800000000ctlptl-0.8.43/test/kind07070100000096000081A400000000000000000000000168AFB0EA0000017C000000000000000000000000000000000000002300000000ctlptl-0.8.43/test/kind/Dockerfile# syntax=docker/dockerfile:1

FROM golang:1.24-alpine
RUN apk update && apk add bash git curl tar
ENV CGO_ENABLED=0
ENV KO_VERSION=0.14.1
RUN curl -fsSL https://github.com/ko-build/ko/releases/download/v${KO_VERSION}/ko_${KO_VERSION}_Linux_$(uname -m).tar.gz \
    | tar -xzv ko && \
    mv ko /usr/local/bin/ko
WORKDIR /go/github.com/tilt-dev/ctlptl/test/cluster-network
ADD . .
07070100000097000081A400000000000000000000000168AFB0EA0000029E000000000000000000000000000000000000002500000000ctlptl-0.8.43/test/kind/builder.yamlapiVersion: batch/v1
kind: Job
metadata:
  name: ko-builder
spec:
  template:
    metadata:
      labels:
        app: ko-builder
    spec:
      containers:
      - name: builder
        image: HOST_FROM_CONTAINER_RUNTIME/ko-builder
        command:
          - bash
          - "-c"
          - |
            set -e
            go mod init github.com/tilt-dev/test-ctlptl
            go get github.com/tilt-dev/ctlptl/test/simple-server@latest
            ko publish -B --insecure-registry github.com/tilt-dev/ctlptl/test/simple-server
        env:
        - name: KO_DOCKER_REPO
          value: HOST_FROM_CLUSTER_NETWORK
      restartPolicy: Never
  backoffLimit: 0
07070100000098000081A400000000000000000000000168AFB0EA00000095000000000000000000000000000000000000002500000000ctlptl-0.8.43/test/kind/cluster.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: kind-ctlptl-test-cluster
product: kind
registry: ctlptl-test-registry
kubernetesVersion: v1.34.0
07070100000099000081ED00000000000000000000000168AFB0EA00000883000000000000000000000000000000000000001F00000000ctlptl-0.8.43/test/kind/e2e.sh#!/bin/bash
# Tests creating a cluster with a registry,
# building a container in that cluster,
# then running that container.

set -exo pipefail

export DOCKER_BUILDKIT="1"

cd $(dirname $(realpath $0))
CLUSTER_NAME="kind-ctlptl-test-cluster"
ctlptl apply -f registry.yaml
ctlptl apply -f cluster.yaml

# The ko-builder runs in an image tagged with the host as visible from the local machine.
docker buildx build --load -t localhost:5005/ko-builder .
docker push localhost:5005/ko-builder

# The ko-builder builds an image tagged with the host as visible from the cluster network.
HOST_FROM_CONTAINER_RUNTIME=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.host}}')
HOST_FROM_CLUSTER_NETWORK=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.hostFromClusterNetwork}}')
cat builder.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    sed "s/HOST_FROM_CLUSTER_NETWORK/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -

set +e
kubectl wait --for=condition=complete job/ko-builder --timeout=180s
RESULT="$?"
set -e

if [[ "$RESULT" != "0" ]]; then
    echo "ko-builder never became healthy"
    kubectl describe pods -l app=ko-builder
    kubectl logs -l app=ko-builder --all-containers
    exit 1
fi

# Test registry from both localhost and the connected network.
cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s
kubectl delete deployment simple-server

cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s

# Check to see we started the right kubernetes version.
k8sVersion=$(ctlptl get cluster "$CLUSTER_NAME" -o go-template --template='{{.status.kubernetesVersion}}')

ctlptl delete -f cluster.yaml

if [[ "$k8sVersion" != "v1.34.0" ]]; then
    echo "Expected kubernetes version v1.34.0 but got $k8sVersion"
    exit 1
fi

echo "kind e2e test passed!"
0707010000009A000081A400000000000000000000000168AFB0EA00000055000000000000000000000000000000000000002600000000ctlptl-0.8.43/test/kind/registry.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: ctlptl-test-registry
port: 5005
0707010000009B000081A400000000000000000000000168AFB0EA000001D8000000000000000000000000000000000000002B00000000ctlptl-0.8.43/test/kind/simple-server.yamlapiVersion: apps/v1
kind: Deployment
metadata:
  name: simple-server
  labels:
    app: simple-server
spec:
  selector:
    matchLabels:
      app: simple-server
  template:
    metadata:
      labels:
        app: simple-server
    spec:
      containers:
      - name: simple-server
        image: HOST_FROM_CONTAINER_RUNTIME/simple-server
        ports:
        - containerPort: 8080
        livenessProbe:
          httpGet:
            path: /
            port: 8080
0707010000009C000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000001C00000000ctlptl-0.8.43/test/minikube0707010000009D000081A400000000000000000000000168AFB0EA0000017C000000000000000000000000000000000000002700000000ctlptl-0.8.43/test/minikube/Dockerfile# syntax=docker/dockerfile:1

FROM golang:1.24-alpine
RUN apk update && apk add bash git curl tar
ENV CGO_ENABLED=0
ENV KO_VERSION=0.14.1
RUN curl -fsSL https://github.com/ko-build/ko/releases/download/v${KO_VERSION}/ko_${KO_VERSION}_Linux_$(uname -m).tar.gz \
    | tar -xzv ko && \
    mv ko /usr/local/bin/ko
WORKDIR /go/github.com/tilt-dev/ctlptl/test/cluster-network
ADD . .
0707010000009E000081A400000000000000000000000168AFB0EA0000029E000000000000000000000000000000000000002900000000ctlptl-0.8.43/test/minikube/builder.yamlapiVersion: batch/v1
kind: Job
metadata:
  name: ko-builder
spec:
  template:
    metadata:
      labels:
        app: ko-builder
    spec:
      containers:
      - name: builder
        image: HOST_FROM_CONTAINER_RUNTIME/ko-builder
        command:
          - bash
          - "-c"
          - |
            set -e
            go mod init github.com/tilt-dev/test-ctlptl
            go get github.com/tilt-dev/ctlptl/test/simple-server@latest
            ko publish -B --insecure-registry github.com/tilt-dev/ctlptl/test/simple-server
        env:
        - name: KO_DOCKER_REPO
          value: HOST_FROM_CLUSTER_NETWORK
      restartPolicy: Never
  backoffLimit: 0
0707010000009F000081A400000000000000000000000168AFB0EA0000009D000000000000000000000000000000000000002900000000ctlptl-0.8.43/test/minikube/cluster.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Cluster
name: minikube-ctlptl-test-cluster
product: minikube
registry: ctlptl-test-registry
kubernetesVersion: v1.31.0
070701000000A0000081ED00000000000000000000000168AFB0EA0000088B000000000000000000000000000000000000002300000000ctlptl-0.8.43/test/minikube/e2e.sh#!/bin/bash
# Tests creating a cluster with a registry,
# building a container in that cluster,
# then running that container.

set -exo pipefail

export DOCKER_BUILDKIT="1"

cd $(dirname $(realpath $0))
CLUSTER_NAME="minikube-ctlptl-test-cluster"
ctlptl apply -f registry.yaml
ctlptl apply -f cluster.yaml

# The ko-builder runs in an image tagged with the host as visible from the local machine.
docker buildx build --load -t localhost:5005/ko-builder .
docker push localhost:5005/ko-builder

# The ko-builder builds an image tagged with the host as visible from the cluster network.
HOST_FROM_CONTAINER_RUNTIME=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.host}}')
HOST_FROM_CLUSTER_NETWORK=$(ctlptl get cluster "$CLUSTER_NAME" -o template --template '{{.status.localRegistryHosting.hostFromClusterNetwork}}')
cat builder.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    sed "s/HOST_FROM_CLUSTER_NETWORK/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -

set +e
kubectl wait --for=condition=complete job/ko-builder --timeout=180s
RESULT="$?"
set -e

if [[ "$RESULT" != "0" ]]; then
    echo "ko-builder never became healthy"
    kubectl describe pods -l app=ko-builder
    kubectl logs -l app=ko-builder --all-containers
    exit 1
fi

# Test registry from both localhost and the connected network.
cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CONTAINER_RUNTIME/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s
kubectl delete deployment simple-server

cat simple-server.yaml | \
    sed "s/HOST_FROM_CONTAINER_RUNTIME/$HOST_FROM_CLUSTER_NETWORK/g" | \
    kubectl apply -f -
kubectl wait --for=condition=ready pods -l app=simple-server --timeout=60s

# Check to see we started the right kubernetes version.
k8sVersion=$(ctlptl get cluster "$CLUSTER_NAME" -o go-template --template='{{.status.kubernetesVersion}}')

ctlptl delete -f cluster.yaml

if [[ "$k8sVersion" != "v1.31.0" ]]; then
    echo "Expected kubernetes version v1.31.0 but got $k8sVersion"
    exit 1
fi

echo "minikube e2e test passed!"
070701000000A1000081A400000000000000000000000168AFB0EA00000055000000000000000000000000000000000000002A00000000ctlptl-0.8.43/test/minikube/registry.yamlapiVersion: ctlptl.dev/v1alpha1
kind: Registry
name: ctlptl-test-registry
port: 5005
070701000000A2000081A400000000000000000000000168AFB0EA000001D8000000000000000000000000000000000000002F00000000ctlptl-0.8.43/test/minikube/simple-server.yamlapiVersion: apps/v1
kind: Deployment
metadata:
  name: simple-server
  labels:
    app: simple-server
spec:
  selector:
    matchLabels:
      app: simple-server
  template:
    metadata:
      labels:
        app: simple-server
    spec:
      containers:
      - name: simple-server
        image: HOST_FROM_CONTAINER_RUNTIME/simple-server
        ports:
        - containerPort: 8080
        livenessProbe:
          httpGet:
            path: /
            port: 8080
070701000000A3000041ED00000000000000000000000268AFB0EA00000000000000000000000000000000000000000000002100000000ctlptl-0.8.43/test/simple-server070701000000A4000081A400000000000000000000000168AFB0EA00000136000000000000000000000000000000000000002900000000ctlptl-0.8.43/test/simple-server/main.gopackage main

import (
	"log"
	"net/http"
)

func main() {
	log.Println("simple-server running on port 8080")
	err := http.ListenAndServe(":8080", http.HandlerFunc(handler))
	if err != nil {
		log.Fatal(err)
	}
}

func handler(w http.ResponseWriter, r *http.Request) {
	_, _ = w.Write([]byte("hello world"))
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!983 blocks
openSUSE Build Service is sponsored by