File kubectl-gather-0.10.1.obscpio of Package kubectl-gather
07070100000000000081A400000000000000000000000168A7671000000285000000000000000000000000000000000000002500000000kubectl-gather-0.10.1/.golangci.yaml# SPDX-FileCopyrightText: The kubectl-gather authors
# SPDX-License-Identifier: Apache-2.0
---
version: "2"
linters:
enable:
- errcheck
- goconst
- nolintlint
# TODO: enable after fixing issues
#- gocritic
#- gosec
#- misspell
#- stylecheck
#- unconvert
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
07070100000001000081A400000000000000000000000168A7671000001339000000000000000000000000000000000000002600000000kubectl-gather-0.10.1/CONTRIBUTING.md# Contributing
## Setting up development environment
### Fedora
Install required packages:
```console
sudo dnf install git golang make podman
```
Check required go version in go.mod. If your distro version is too old, see
[Managing Go installations](https://go.dev/doc/manage-install) for info on
installing the required version.
Install additional tools:
- *kubectl*: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
- *kind*: https://kind.sigs.k8s.io/docs/user/quick-start/
- *oc*: https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/
### macOS
Install the require packages:
```console
brew install git go make podman kubectl kind
```
To build container images or run tests using kind you need to start the podman machine:
```console
podman machine init
podman machine start
```
Install additional tools:
- *oc*: https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/
### Get the source
Fork the project in github and clone the source:
```console
git clone https://github.com/{my-github-username}/kubectl-gather.git
```
## Build
```console
make
```
## Installing
Install a symlink to `kubectl-gather` and `kubectl_complete-gather` in
the PATH so *kubectl* and *oc* can find them.
```console
ln -s $PWD/kubectl-gather ~/bin/
ln -s $PWD/kubectl_complete-gather ~/bin/
ln -s $PWD/kubectl_complete-gather ~/bin/oc_complete-gather
```
## Creating test clusters
Create test clusters:
```console
kind create cluster -n c1
kind create cluster -n c2
```
> [!NOTE]
> kind adds "kind-" prefix to the cluster names
## Testing local gather
Gather data from the kind clusters:
```console
% kubectl gather --contexts kind-c1,kind-c2 -d gather.local
2024-09-04T02:05:12.146+0300 INFO gather Using kubeconfig "/Users/nsoffer/.kube/config"
2024-09-04T02:05:12.148+0300 INFO gather Gathering from all namespaces
2024-09-04T02:05:12.148+0300 INFO gather Using all addons
2024-09-04T02:05:12.148+0300 INFO gather Gathering from cluster "kind-c1"
2024-09-04T02:05:12.148+0300 INFO gather Gathering from cluster "kind-c2"
2024-09-04T02:05:12.288+0300 INFO gather Gathered 324 resources from cluster "kind-c1" in 0.140 seconds
2024-09-04T02:05:12.288+0300 INFO gather Gathered 339 resources from cluster "kind-c2" in 0.140 seconds
2024-09-04T02:05:12.288+0300 INFO gather Gathered 663 resources from 2 clusters in 0.140 seconds
```
Inspecting gathered data:
```console
% tree -L2 gather.local
gather.local
├── gather.log
├── kind-c1
│ ├── cluster
│ └── namespaces
└── kind-c2
├── cluster
└── namespaces
```
## Build and push a container image
Build and push a multi-arch container image to your private quay.io
repo:
```console
make container REPO=my-quay-user
make container-push
```
> [!IMPORTANT]
> - Make your repo public to used it for gathering.
## Testing remote gather
Gather data data remotely using your new image:
```console
% kubectl gather --contexts kind-c1,kind-c2 --remote -d gather.remote
2024-09-04T02:45:26.051+0300 INFO gather Using kubeconfig "/Users/nsoffer/.kube/config"
2024-09-04T02:45:26.051+0300 INFO gather Gathering from all namespaces
2024-09-04T02:45:26.051+0300 INFO gather Using all addons
2024-09-04T02:45:26.051+0300 INFO gather Gathering on remote cluster "kind-c2"
2024-09-04T02:45:26.052+0300 INFO gather Gathering on remote cluster "kind-c1"
2024-09-04T02:45:36.435+0300 INFO gather Gathered on remote cluster "kind-c2" in 10.383 seconds
2024-09-04T02:45:36.437+0300 INFO gather Gathered on remote cluster "kind-c1" in 10.385 seconds
2024-09-04T02:45:36.437+0300 INFO gather Gathered 2 clusters in 10.385 seconds
```
Inspecting gathered data:
```console
% tree -L3 gather.remote
gather.remote
├── gather.log
├── kind-c1
│ ├── event-filter.html
│ ├── must-gather.log
│ ├── must-gather.logs
│ ├── quay-io-nirsof-gather-sha256-aa5b3469396e5fc9217a4ffb2cc88465c4dedb311aef072bc1556b2a34f1339c
│ │ ├── cluster
│ │ ├── gather.log
│ │ ├── gather.logs
│ │ ├── namespaces
│ │ └── version
│ └── timestamp
└── kind-c2
├── event-filter.html
├── must-gather.log
├── must-gather.logs
├── quay-io-nirsof-gather-sha256-aa5b3469396e5fc9217a4ffb2cc88465c4dedb311aef072bc1556b2a34f1339c
│ ├── cluster
│ ├── gather.log
│ ├── gather.logs
│ ├── namespaces
│ └── version
└── timestamp
```
## Sending pull requests
Pull requests can be submitted to https://github.com/nirs/kubectl-gather/pulls.
Tips:
- Keep pull requests small
- Each commit should have one logical change
- Before sending a pull request rebase on upstream main branch.
- Test your changes with local and remote clusters
07070100000002000081A400000000000000000000000168A7671000000500000000000000000000000000000000000000002400000000kubectl-gather-0.10.1/Containerfile# SPDX-FileCopyrightText: The kubectl-gather authors
# SPDX-License-Identifier: Apache-2.0
ARG go_version
ARG ldflags
FROM docker.io/library/golang:${go_version} as builder
WORKDIR /build
COPY go.mod go.sum ./
# Cache dependencies before copying sources so source changes do not
# invalidated cacehd dependencies.
RUN go mod download
COPY cmd cmd
COPY pkg pkg
COPY main.go main.go
# Build env variables:
# - CGO_ENABLED=0: Disable CGO to avoid dependencies on libc. Built image can
# be built on latest Fedora and run on old RHEL.
# - GOTOOLCHAIN=auto: The go command downloads newer toolchain as needed.
# https://go.dev/doc/toolchain#download
RUN GOTOOLCHAIN=auto CGO_ENABLED=0 go build -ldflags="${ldflags}"
FROM docker.io/library/alpine:latest
# Required for must-gather: rsync, bash
# Required for kubectl-gather: tar, kubectl
RUN apk add --no-cache \
bash \
rsync \
tar \
&& apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community \
kubectl \
&& mkdir -p licenses
COPY --from=builder /build/kubectl-gather /usr/bin/kubectl-gather
COPY gather /usr/bin/gather
COPY LICENSE licenses/Apache-2.0.txt
# Use exec form to allow passing arguemnts from docker commmand.
ENTRYPOINT ["/usr/bin/gather"]
07070100000003000081A400000000000000000000000168A7671000002C5D000000000000000000000000000000000000001E00000000kubectl-gather-0.10.1/LICENSE Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
07070100000004000081A400000000000000000000000168A76710000005A3000000000000000000000000000000000000001F00000000kubectl-gather-0.10.1/Makefile# SPDX-FileCopyrightText: The kubectl-gather authors
# SPDX-License-Identifier: Apache-2.0
# 0.5.1 when building from tag (release)
# 0.5.1-1-gcf79160 when building without tag (development)
version := $(shell git describe --tags | sed -e 's/^v//')
REGISTRY ?= quay.io
REPO ?= nirsof
IMAGE ?= gather
TAG ?= $(version)
package := github.com/nirs/kubectl-gather/pkg/gather
image := $(REGISTRY)/$(REPO)/$(IMAGE):$(TAG)
go_version := $(shell go list -f "{{.GoVersion}}" -m)
# % go build -ldflags="-help"
# -s disable symbol table
# -w disable DWARF generation
# -X definition
# add string value definition of the form importpath.name=value
ldflags := -s -w \
-X '$(package).Version=$(version)' \
-X '$(package).Image=$(image)'
.PHONY: all kubectl-gather
all: kubectl-gather
lint:
golangci-lint run ./...
cd e2e && golangci-lint run ./...
container:
podman build \
--platform=linux/amd64,linux/arm64 \
--manifest $(image) \
--build-arg ldflags="$(ldflags)" \
--build-arg go_version="$(go_version)" \
.
container-push: container
podman manifest push --all $(image)
# Build env variables:
# - CGO_ENABLED=0: Disable CGO to avoid dependencies on libc. Built image can
# be built on latest Fedora and run on old RHEL.
# - GOTOOLCHAIN=auto: The go command downloads newer toolchain as needed.
# https://go.dev/doc/toolchain#download
kubectl-gather:
GO_TOOLCHAIN=auto CGO_ENABLED=0 go build -ldflags="$(ldflags)"
07070100000005000081A400000000000000000000000168A767100000639E000000000000000000000000000000000000002000000000kubectl-gather-0.10.1/README.md# kubectl-gather
[](https://pkg.go.dev/github.com/nirs/kubectl-gather)
This is a kubectl plugin for gathering data about your cluster that may
help to debug issues.
Kubernetes is big and complicated, and when something breaks it is hard
to tell which info is needed for debugging. Even if you known which
resources or logs are needed, it is hard to get the data manually. When
working with multiple related clusters gathering the right data from the
right cluster is even harder.
The `kubectl gather` tool makes it easy to gather data quickly from
multiple clusters with a single command. It gathers *all* resources from
*all* clusters. It also gather related data such as pods logs, on for
specific cases, external logs stored on the nodes. The data is stored in
a local directory, one file per resource, making it easy to navigate and
inspect using standard tools. If you know what you want to gather, it
is much faster and consume fraction of the storage to gather only
specific namespaces from all clusters.
## Installing
Download the executable for your operating system and architecture and
install in the PATH.
To install the latest version on Linux and macOS, run:
```
tag="$(curl -fsSL https://api.github.com/repos/nirs/kubectl-gather/releases/latest | jq -r .tag_name)"
os="$(uname | tr '[:upper:]' '[:lower:]')"
machine="$(uname -m)"
if [ "$machine" = "aarch64" ]; then machine="arm64"; fi
if [ "$machine" = "x86_64" ]; then machine="amd64"; fi
curl -L -o kubectl-gather https://github.com/nirs/kubectl-gather/releases/download/$tag/kubectl-gather-$tag-$os-$machine
sudo install kubectl-gather /usr/local/bin
rm kubectl-gather
```
## Shell completion
To enable shell completion install the
[kubectl_complete-gather](kubectl_complete-gather) script in the PATH.
```
curl -L -O https://raw.githubusercontent.com/nirs/kubectl-gather/main/kubectl_complete-gather
sudo install kubectl_complete-gather /usr/local/bin
rm kubectl_complete-gather
```
> [!NOTE]
> To enable shell completion when running as an `oc` plugin, the name of
> the completion script must be `oc_complete-gather`.
## Gathering everything from the current cluster
The simplest way is to gather everything from the current cluster named
"hub":
```
$ kubectl gather -d gather.one
2024-05-27T23:03:58.838+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-27T23:03:58.840+0300 INFO gather Using current context "hub"
2024-05-27T23:03:58.841+0300 INFO gather Gathering from all namespaces
2024-05-27T23:03:58.841+0300 INFO gather Gathering from cluster "hub"
2024-05-27T23:04:00.219+0300 INFO gather Gathered 1439 resources from cluster "hub" in 1.379 seconds
2024-05-27T23:04:00.220+0300 INFO gather Gathered 1439 resources from 1 clusters in 1.379 seconds
```
This gathers 15 MiB of data into the directory "gather.one":
```
$ du -sh gather.one/
15M gather.one/
```
The "cluster" directory contains the cluster scope resources, and the
"namespaces" directory contains the namespaced resources:
```
$ tree -L 2 gather.one/
gather.one/
├── gather.log
└── hub
├── cluster
└── namespaces
```
Here is example content from the "pods" directory in the "ramen-system"
namespace:
```
$ tree gather.one/hub/namespaces/ramen-system/pods/
gather.one/hub/namespaces/ramen-system/pods/
├── ramen-hub-operator-84d7dc89bd-7qkwm
│ ├── kube-rbac-proxy
│ │ └── current.log
│ └── manager
│ └── current.log
└── ramen-hub-operator-84d7dc89bd-7qkwm.yaml
```
We can use standard tools to inspect the data. In this example we grep
all current and previous logs in all namespaces:
```
$ grep WARN gather.one/hub/namespaces/*/pods/*/*/*.log
gather.one/hub/namespaces/kube-system/pods/coredns-7db6d8ff4d-9cj6c/coredns/current.log:[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
gather.one/hub/namespaces/kube-system/pods/kube-controller-manager-hub/kube-controller-manager/current.log:E0527 19:52:08.593071 1 core.go:105] "Failed to start service controller" err="WARNING: no cloud provider provided, services of type LoadBalancer will fail" logger="service-lb-controller"
```
## Gathering data from multiple clusters
In this example we have 3 clusters configured for disaster recovery:
```
$ kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
dr1 dr1 dr1 default
dr2 dr2 dr2 default
* hub hub hub default
```
To gather data from all clusters run:
```
$ kubectl gather --contexts hub,dr1,dr2 -d gather.all
2024-05-27T23:16:16.459+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-27T23:16:16.460+0300 INFO gather Gathering from all namespaces
2024-05-27T23:16:16.460+0300 INFO gather Gathering from cluster "hub"
2024-05-27T23:16:16.461+0300 INFO gather Gathering from cluster "dr1"
2024-05-27T23:16:16.461+0300 INFO gather Gathering from cluster "dr2"
2024-05-27T23:16:18.624+0300 INFO gather Gathered 1441 resources from cluster "hub" in 2.163 seconds
2024-05-27T23:16:20.316+0300 INFO gather Gathered 1934 resources from cluster "dr2" in 3.855 seconds
2024-05-27T23:16:20.705+0300 INFO gather Gathered 1979 resources from cluster "dr1" in 4.244 seconds
2024-05-27T23:16:20.705+0300 INFO gather Gathered 5354 resources from 3 clusters in 4.245 seconds
```
This gathers 78 MiB of data into the directory "gather.all":
```
$ du -sh gather.all/
78M gather.all/
```
The data compresses well and can be attached to a bug tracker:
```
$ tar czf gather.all.tar.gz gather.all
$ du -sh gather.all.tar.gz
6.4M gather.all.tar.gz
```
The gather directory includes now all clusters:
```
$ tree -L 2 gather.all/
gather.all/
├── dr1
│ ├── addons
│ ├── cluster
│ └── namespaces
├── dr2
│ ├── addons
│ ├── cluster
│ └── namespaces
├── gather.log
└── hub
├── cluster
└── namespaces
```
Clusters "dr1" and "dr2" have a "rook-ceph" storage system, so the
"rook" addon collected more data in the "addons" directory. The
"commands" directory contains output from various ceph commands, and the
"logs" directory contains external logs stored on the nodes. Since this
is a single node minikube cluster, we have only one node, "dr1".
```
$ tree gather.all/dr1/addons/rook/
gather.all/dr1/addons/rook/
├── commands
│ ├── ceph-osd-blocklist-ls
│ └── ceph-status
└── logs
└── dr1
├── 59ccb238-dd08-4225-af2f-d9aef1ad4d29-client.rbd-mirror-peer.log
├── ceph-client.ceph-exporter.log
├── ceph-client.rbd-mirror.a.log
├── ceph-mds.myfs-a.log
├── ceph-mds.myfs-b.log
├── ceph-mgr.a.log
├── ceph-mon.a.log
├── ceph-osd.0.log
└── ceph-volume.log
```
## Gathering data for specific namespaces
When debugging a problem, it is useful to gather data for specific
namespaces. This is very quick and produce a small amount of data.
Lets gather data from the "deployment-rbd" namespace. The namespace
exists on the "hub" and "dr1" clusters. Depending on the application
state, the namespace can be also on cluster "dr2".
To gather the "deployment-rbd" namespace from all clusters use:
```
$ kubectl gather --contexts hub,dr1,dr2 -n deployment-rbd -d gather.before
2024-05-27T23:33:45.883+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-27T23:33:45.888+0300 INFO gather Gathering from namespaces [deployment-rbd]
2024-05-27T23:33:45.888+0300 INFO gather Gathering from cluster "hub"
2024-05-27T23:33:45.888+0300 INFO gather Gathering from cluster "dr1"
2024-05-27T23:33:45.888+0300 INFO gather Gathering from cluster "dr2"
2024-05-27T23:33:45.905+0300 INFO gather Gathered 0 resources from cluster "dr2" in 0.017 seconds
2024-05-27T23:33:45.987+0300 INFO gather Gathered 18 resources from cluster "hub" in 0.099 seconds
2024-05-27T23:33:46.024+0300 INFO gather Gathered 24 resources from cluster "dr1" in 0.136 seconds
2024-05-27T23:33:46.024+0300 INFO gather Gathered 42 resources from 3 clusters in 0.137 seconds
```
This gathered tiny amount of data very quickly:
```
$ du -sh gather.before/
244K gather.before/
```
This gathered everything under the specified namespace in all clusters
having this namespace:
```
$ tree -L 4 gather.before/
gather.before/
├── dr1
│ └── namespaces
│ └── deployment-rbd
│ ├── apps
│ ├── apps.open-cluster-management.io
│ ├── configmaps
│ ├── events.k8s.io
│ ├── persistentvolumeclaims
│ ├── pods
│ ├── ramendr.openshift.io
│ ├── replication.storage.openshift.io
│ └── serviceaccounts
├── gather.log
└── hub
└── namespaces
└── deployment-rbd
├── apps.open-cluster-management.io
├── cluster.open-cluster-management.io
├── configmaps
├── events.k8s.io
├── ramendr.openshift.io
└── serviceaccounts
```
After failing over the application to cluster "dr2", we can gather the
data again to compare the application state before and after the
failover:
```
$ kubectl gather --contexts hub,dr1,dr2 -n deployment-rbd -d gather.after
2024-05-27T23:45:20.292+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-27T23:45:20.297+0300 INFO gather Gathering from namespaces [deployment-rbd]
2024-05-27T23:45:20.297+0300 INFO gather Gathering from cluster "hub"
2024-05-27T23:45:20.297+0300 INFO gather Gathering from cluster "dr1"
2024-05-27T23:45:20.297+0300 INFO gather Gathering from cluster "dr2"
2024-05-27T23:45:20.418+0300 INFO gather Gathered 23 resources from cluster "hub" in 0.121 seconds
2024-05-27T23:45:20.421+0300 INFO gather Gathered 20 resources from cluster "dr1" in 0.123 seconds
2024-05-27T23:45:20.435+0300 INFO gather Gathered 19 resources from cluster "dr2" in 0.137 seconds
2024-05-27T23:45:20.435+0300 INFO gather Gathered 62 resources from 3 clusters in 0.138 seconds
```
We can see that the application is running on cluster "dr2":
```
$ tree -L 4 gather.after/
gather.after/
├── dr1
│ └── namespaces
│ └── deployment-rbd
│ ├── configmaps
│ ├── events.k8s.io
│ └── serviceaccounts
├── dr2
│ └── namespaces
│ └── deployment-rbd
│ ├── apps
│ ├── apps.open-cluster-management.io
│ ├── configmaps
│ ├── events.k8s.io
│ ├── persistentvolumeclaims
│ ├── pods
│ ├── ramendr.openshift.io
│ ├── replication.storage.openshift.io
│ └── serviceaccounts
├── gather.log
└── hub
└── namespaces
└── deployment-rbd
├── apps.open-cluster-management.io
├── cluster.open-cluster-management.io
├── configmaps
├── events.k8s.io
├── ramendr.openshift.io
└── serviceaccounts
```
Now we can compare application resource before and after the failover:
```
$ diff -u gather.before/hub/namespaces/deployment-rbd/ramendr.openshift.io/drplacementcontrols/deployment-rbd-drpc.yaml \
gather.after/hub/namespaces/deployment-rbd/ramendr.openshift.io/drplacementcontrols/deployment-rbd-drpc.yaml
--- gather.before/hub/namespaces/deployment-rbd/ramendr.openshift.io/drplacementcontrols/deployment-rbd-drpc.yaml 2024-05-27 23:33:45.979547024 +0300
+++ gather.after/hub/namespaces/deployment-rbd/ramendr.openshift.io/drplacementcontrols/deployment-rbd-drpc.yaml 2024-05-27 23:45:20.405342350 +0300
@@ -3,13 +3,13 @@
metadata:
annotations:
drplacementcontrol.ramendr.openshift.io/app-namespace: deployment-rbd
- drplacementcontrol.ramendr.openshift.io/last-app-deployment-cluster: dr1
+ drplacementcontrol.ramendr.openshift.io/last-app-deployment-cluster: dr2
kubectl.kubernetes.io/last-applied-configuration: |
...
```
## Gathering cluster scoped resources
When gathering specific namespaces, addons gather related cluster-scoped
resources. Use "--cluster=true" to gather all cluster resources instead of
just those related to your namespaced resources.
To gather specific namespaces along with cluster scoped resources:
```
$ kubectl-gather --contexts dr1,dr2 --namespaces=e2e-appset-deploy-cephfs --cluster --directory gather.mixed
2025-08-01T14:25:15.029+0530 INFO gather Using kubeconfig "/Users/pari/.kube/config"
2025-08-01T14:25:15.031+0530 INFO gather Gathering from namespaces ["e2e-appset-deploy-cephfs"]
2025-08-01T14:25:15.031+0530 INFO gather Gathering cluster scoped resources
2025-08-01T14:25:15.031+0530 INFO gather Using all addons
2025-08-01T14:25:15.031+0530 INFO gather Gathering from cluster "dr1"
2025-08-01T14:25:15.031+0530 INFO gather Gathering from cluster "dr2"
2025-08-01T14:25:15.965+0530 INFO gather Gathered 458 resources from cluster "dr2" in 0.934 seconds
2025-08-01T14:25:15.966+0530 INFO gather Gathered 475 resources from cluster "dr1" in 0.934 seconds
2025-08-01T14:25:15.966+0530 INFO gather Gathered 933 resources from 2 clusters in 0.934 seconds
```
This gathers 12M MiB of data into the directory "gather.mixed":
```
$ du -sh gather.mixed/
12M gather.mixed
```
The gather directory should include namespace and cluster scoped resources:
```
$ tree -L 3 gather.mixed/
gather.mixed/
├── dr1
│ ├── cluster
│ │ ├── apiextensions.k8s.io
│ │ ├── apiregistration.k8s.io
│ │ ├── certificates.k8s.io
│ │ ├── cluster.open-cluster-management.io
│ │ ├── flowcontrol.apiserver.k8s.io
│ │ ├── namespaces
│ │ ├── networking.k8s.io
│ │ ├── nodes
│ │ ├── operator.open-cluster-management.io
│ │ ├── operators.coreos.com
│ │ ├── persistentvolumes
│ │ ├── ramendr.openshift.io
│ │ ├── rbac.authorization.k8s.io
│ │ ├── replication.storage.openshift.io
│ │ ├── scheduling.k8s.io
│ │ ├── snapshot.storage.k8s.io
│ │ ├── storage.k8s.io
│ │ ├── submariner.io
│ │ └── work.open-cluster-management.io
│ └── namespaces
│ └── e2e-appset-deploy-cephfs
├── dr2
│ ├── cluster
│ │ ├── apiextensions.k8s.io
│ │ ├── apiregistration.k8s.io
│ │ ├── certificates.k8s.io
│ │ ├── cluster.open-cluster-management.io
│ │ ├── flowcontrol.apiserver.k8s.io
│ │ ├── namespaces
│ │ ├── networking.k8s.io
│ │ ├── nodes
│ │ ├── operator.open-cluster-management.io
│ │ ├── operators.coreos.com
│ │ ├── persistentvolumes
│ │ ├── ramendr.openshift.io
│ │ ├── rbac.authorization.k8s.io
│ │ ├── replication.storage.openshift.io
│ │ ├── scheduling.k8s.io
│ │ ├── snapshot.storage.k8s.io
│ │ ├── storage.k8s.io
│ │ ├── submariner.io
│ │ └── work.open-cluster-management.io
│ └── namespaces
│ └── e2e-appset-deploy-cephfs
└── gather.log
```
## Gathering remote clusters
When gathering remote clusters it can be faster to gather the data on
the remote clusters and download the data to the local directory.
> [!IMPORTANT]
> Gathering remotely require the "oc" command.
In this example we gather data from OpenShift Data Foundation clusters
configured for disaster recovery. Gathering everything takes more than 6
minutes:
$ kubectl gather --contexts kevin-rdr-hub,kevin-rdr-c1,kevin-rdr-c2 --remote --directory gather.remote
2024-05-28T20:57:32.684+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-28T20:57:32.686+0300 INFO gather Gathering from all namespaces
2024-05-28T20:57:32.686+0300 INFO gather Gathering on remote cluster "kevin-rdr-c2"
2024-05-28T20:57:32.686+0300 INFO gather Gathering on remote cluster "kevin-rdr-c1"
2024-05-28T20:57:32.686+0300 INFO gather Gathering on remote cluster "kevin-rdr-hub"
2024-05-28T20:59:49.362+0300 INFO gather Gathered on remote cluster "kevin-rdr-hub" in 136.676 seconds
2024-05-28T21:02:45.090+0300 INFO gather Gathered on remote cluster "kevin-rdr-c2" in 312.404 seconds
2024-05-28T21:03:51.841+0300 INFO gather Gathered on remote cluster "kevin-rdr-c1" in 379.155 seconds
2024-05-28T21:03:51.841+0300 INFO gather Gathered 3 clusters in 379.155 seconds
This gathered 11 GiB of data:
```
$ du -sh gather.remote/
11G gather.remote/
```
Most of the data is Ceph logs stored on the nodes:
```
$ du -sm gather.remote/*/*/*/* | sort -rn | head
2288 gather.remote/kevin-rdr-c1/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/addons/rook
2190 gather.remote/kevin-rdr-c2/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/addons/rook
583 gather.remote/kevin-rdr-c2/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-storage
501 gather.remote/kevin-rdr-c1/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-storage
282 gather.remote/kevin-rdr-hub/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-openstack-infra
241 gather.remote/kevin-rdr-c1/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-openstack-infra
232 gather.remote/kevin-rdr-c2/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-ovn-kubernetes
189 gather.remote/kevin-rdr-c1/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-monitoring
185 gather.remote/kevin-rdr-hub/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-ovn-kubernetes
174 gather.remote/kevin-rdr-c2/quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e/namespaces/openshift-openstack-infra
```
For remove gathering the directory structure is a little bit deeper. If
you used `must-gather` this probably looks familiar:
```
$ tree -L 3 gather.remote/
gather.remote/
├── gather.log
├── kevin-rdr-c1
│ ├── event-filter.html
│ ├── must-gather.log
│ ├── quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e
│ │ ├── addons
│ │ ├── cluster
│ │ ├── gather.log
│ │ ├── namespaces
│ │ └── version
│ └── timestamp
├── kevin-rdr-c2
│ ├── event-filter.html
│ ├── must-gather.log
│ ├── quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e
│ │ ├── addons
│ │ ├── cluster
│ │ ├── gather.log
│ │ ├── namespaces
│ │ └── version
│ └── timestamp
└── kevin-rdr-hub
├── event-filter.html
├── must-gather.log
├── quay-io-nirsof-gather-sha256-8999a022a9f243df3255f8bb41977fd6936c311cb20a015cbc632a873530da9e
│ ├── cluster
│ ├── gather.log
│ ├── namespaces
│ └── version
└── timestamp
```
Gathering only specific namespaces from these clusters is much quicker.
In this example we gather data related to single DR protected VM:
```
$ kubectl gather --contexts kevin-rdr-hub,kevin-rdr-c1,kevin-rdr-c2 --namespaces openshift-dr-ops,ui-vms3 --remote -d gather.remote.app
2024-05-28T21:14:15.883+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-05-28T21:14:15.884+0300 INFO gather Gathering from namespaces [openshift-dr-ops ui-vms3]
2024-05-28T21:14:15.884+0300 INFO gather Gathering on remote cluster "kevin-rdr-c2"
2024-05-28T21:14:15.884+0300 INFO gather Gathering on remote cluster "kevin-rdr-c1"
2024-05-28T21:14:15.884+0300 INFO gather Gathering on remote cluster "kevin-rdr-hub"
2024-05-28T21:14:33.247+0300 INFO gather Gathered on remote cluster "kevin-rdr-c2" in 17.363 seconds
2024-05-28T21:14:33.491+0300 INFO gather Gathered on remote cluster "kevin-rdr-c1" in 17.607 seconds
2024-05-28T21:14:33.577+0300 INFO gather Gathered on remote cluster "kevin-rdr-hub" in 17.692 seconds
2024-05-28T21:14:33.577+0300 INFO gather Gathered 3 clusters in 17.693 seconds
```
This gathers only 2.7 MiB:
```
$ du -sh gather.remote.app/
2.7M gather.remote.app/
```
## Enabling specific addons
By default we gather additional data like pod container logs and rook
commands and external logs stored on the nodes. To control gathering of
additional data, you can use the `--addons` flag. If the flag is not set
all addons are enabled.
Gathering only resources:
```
$ kubectl gather --contexts dr1,dr2 --addons= -d gather.resources
2024-06-01T02:13:08.117+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-06-01T02:13:08.118+0300 INFO gather Gathering from all namespaces
2024-06-01T02:13:08.119+0300 INFO gather Using addons []
2024-06-01T02:13:08.119+0300 INFO gather Gathering from cluster "dr1"
2024-06-01T02:13:08.119+0300 INFO gather Gathering from cluster "dr2"
2024-06-01T02:13:08.942+0300 INFO gather Gathered 557 resources from cluster "dr1" in 0.823 seconds
2024-06-01T02:13:08.946+0300 INFO gather Gathered 557 resources from cluster "dr2" in 0.828 seconds
2024-06-01T02:13:08.946+0300 INFO gather Gathered 1114 resources from 2 clusters in 0.828 seconds
```
Gathering resource and pod container logs:
```
$ kubectl gather --contexts dr1,dr2 --addons logs -d gather.logs
2024-06-01T02:12:07.775+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-06-01T02:12:07.776+0300 INFO gather Gathering from all namespaces
2024-06-01T02:12:07.777+0300 INFO gather Using addons ["logs"]
2024-06-01T02:12:07.777+0300 INFO gather Gathering from cluster "dr1"
2024-06-01T02:12:07.777+0300 INFO gather Gathering from cluster "dr2"
2024-06-01T02:12:11.580+0300 INFO gather Gathered 553 resources from cluster "dr2" in 3.803 seconds
2024-06-01T02:12:11.799+0300 INFO gather Gathered 553 resources from cluster "dr1" in 4.022 seconds
2024-06-01T02:12:11.799+0300 INFO gather Gathered 1106 resources from 2 clusters in 4.022 seconds
```
Gathering everything:
```
$ kubectl gather --contexts dr1,dr2 -d gather.all
2024-06-01T02:11:46.490+0300 INFO gather Using kubeconfig "/home/nsoffer/.kube/config"
2024-06-01T02:11:46.492+0300 INFO gather Gathering from all namespaces
2024-06-01T02:11:46.492+0300 INFO gather Using all addons
2024-06-01T02:11:46.492+0300 INFO gather Gathering from cluster "dr1"
2024-06-01T02:11:46.492+0300 INFO gather Gathering from cluster "dr2"
2024-06-01T02:11:50.680+0300 INFO gather Gathered 549 resources from cluster "dr1" in 4.189 seconds
2024-06-01T02:11:50.788+0300 INFO gather Gathered 549 resources from cluster "dr2" in 4.296 seconds
2024-06-01T02:11:50.788+0300 INFO gather Gathered 1098 resources from 2 clusters in 4.297 seconds
```
Comparing the gathered data:
```
$ du -sh gather.*
108M gather.all
35M gather.logs
8.8M gather.resources
```
## Integrating with other programs
When running the *kubectl gather* from another program you may want to
use JSON logs to extract certain fileds from the gather logs.
Example: extracting the "msg" field from gather JSON logs:
```
% kubectl gather --kubeconfig e2e/clusters.yaml \
--contexts kind-c1,kind-c2 \
--log-format json 2>&1 | jq -r .msg
Using kubeconfig "e2e/clusters.yaml"
Gathering from all namespaces
Using all addons
Storing data in "gather.20250114190449"
Gathering from cluster "kind-c1"
Gathering from cluster "kind-c2"
Gathered 321 resources from cluster "kind-c2" in 0.137 seconds
Gathered 338 resources from cluster "kind-c1" in 0.140 seconds
Gathered 659 resources from 2 clusters in 0.140 seconds
```
To extract also debug level logs you can use the `gather.log` file from
the gather directory.
## Similar projects
- [must-gather](https://github.com/openshift/must-gather) - similar tool
for collecting data from OpenShift cluster.
- [SoS](https://github.com/sosreport/sos) - similar tool for collecting
data from a host.
## License
kubectl-gather is under the [Apache 2.0 license](/LICENSE)
07070100000006000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000001A00000000kubectl-gather-0.10.1/cmd07070100000007000081A400000000000000000000000168A76710000006E7000000000000000000000000000000000000002400000000kubectl-gather-0.10.1/cmd/config.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"fmt"
"os"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
type clusterConfig struct {
Config *rest.Config
Context string
}
func loadClusterConfigs(contexts []string, kubeconfig string) ([]*clusterConfig, error) {
if len(contexts) == 0 {
restConfig, err := rest.InClusterConfig()
if err != rest.ErrNotInCluster {
if err != nil {
return nil, err
}
log.Infof("Using in cluster config")
return []*clusterConfig{{Config: restConfig}}, nil
}
log.Debugf("Not running in cluster")
}
config, err := loadKubeconfig(kubeconfig)
if err != nil {
return nil, err
}
if len(contexts) == 0 {
if config.CurrentContext == "" {
return nil, fmt.Errorf("no context specified and current context not set")
}
log.Infof("Using current context %q", config.CurrentContext)
contexts = []string{config.CurrentContext}
}
var configs []*clusterConfig
for _, context := range contexts {
restConfig, err := clientcmd.NewNonInteractiveClientConfig(
*config, context, nil, nil).ClientConfig()
if err != nil {
return nil, err
}
configs = append(configs, &clusterConfig{Config: restConfig, Context: context})
}
return configs, nil
}
func loadKubeconfig(kubeconfig string) (*api.Config, error) {
if kubeconfig == "" {
kubeconfig = defaultKubeconfig()
}
log.Infof("Using kubeconfig %q", kubeconfig)
config, err := clientcmd.LoadFromFile(kubeconfig)
if err != nil {
return nil, err
}
return config, nil
}
func defaultKubeconfig() string {
env := os.Getenv("KUBECONFIG")
if env != "" {
return env
}
return clientcmd.RecommendedHomeFile
}
07070100000008000081A400000000000000000000000168A76710000007A0000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/cmd/local.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"path/filepath"
"sync"
"time"
"github.com/nirs/kubectl-gather/pkg/gather"
)
type result struct {
Count int
Err error
}
func localGather(clusterConfigs []*clusterConfig) {
start := time.Now()
wg := sync.WaitGroup{}
results := make(chan result, len(clusterConfigs))
for i := range clusterConfigs {
clusterConfig := clusterConfigs[i]
if clusterConfig.Context != "" {
log.Infof("Gathering from cluster %q", clusterConfig.Context)
} else {
log.Info("Gathering on cluster")
}
start := time.Now()
directory := filepath.Join(directory, clusterConfig.Context)
options := gather.Options{
Kubeconfig: kubeconfig,
Context: clusterConfig.Context,
Namespaces: namespaces,
Addons: addons,
Cluster: cluster,
Log: log.Named(clusterConfig.Context),
}
wg.Add(1)
go func() {
defer wg.Done()
g, err := gather.New(clusterConfig.Config, directory, options)
if err != nil {
results <- result{Err: err}
return
}
err = g.Gather()
results <- result{Count: g.Count(), Err: err}
if err != nil {
return
}
elapsed := time.Since(start).Seconds()
if clusterConfig.Context != "" {
log.Infof("Gathered %d resources from cluster %q in %.3f seconds",
g.Count(), clusterConfig.Context, elapsed)
} else {
log.Infof("Gathered %d resources on cluster in %.3f seconds",
g.Count(), elapsed)
}
}()
}
wg.Wait()
close(results)
count := 0
for r := range results {
if r.Err != nil {
log.Fatal(r.Err)
}
count += r.Count
}
if len(namespaces) != 0 && count == 0 {
// Likely a user error like a wrong namespace.
log.Warnf("No resource gathered from namespaces %v", namespaces)
}
log.Infof("Gathered %d resources from %d clusters in %.3f seconds",
count, len(clusterConfigs), time.Since(start).Seconds())
}
07070100000009000081A400000000000000000000000168A7671000000A45000000000000000000000000000000000000002400000000kubectl-gather-0.10.1/cmd/remote.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/nirs/kubectl-gather/pkg/gather"
)
func remoteGather(clusterConfigs []*clusterConfig) {
start := time.Now()
wg := sync.WaitGroup{}
errors := make(chan error, len(clusterConfigs))
for i := range clusterConfigs {
clusterConfig := clusterConfigs[i]
directory := filepath.Join(directory, clusterConfig.Context)
wg.Add(1)
go func() {
defer wg.Done()
if err := runMustGather(clusterConfig.Context, directory); err != nil {
errors <- err
}
}()
}
wg.Wait()
close(errors)
for err := range errors {
log.Fatal(err)
}
log.Infof("Gathered %d clusters in %.3f seconds",
len(clusterConfigs), time.Since(start).Seconds())
}
func runMustGather(context string, directory string) error {
log.Infof("Gathering on remote cluster %q", context)
start := time.Now()
logfile, err := createMustGatherLog(directory)
if err != nil {
return err
}
defer logfile.Close()
var stderr bytes.Buffer
cmd := mustGatherCommand(context, directory)
cmd.Stdout = logfile
cmd.Stderr = &stderr
log.Debugf("Running command: %s", cmd)
if err := cmd.Run(); err != nil {
return fmt.Errorf("oc adm must-gather error: %s: %s", err, stderr.String())
}
elapsed := time.Since(start).Seconds()
log.Infof("Gathered on remote cluster %q in %.3f seconds",
context, elapsed)
return nil
}
func createMustGatherLog(directory string) (*os.File, error) {
if err := os.MkdirAll(directory, 0750); err != nil {
return nil, err
}
return os.Create(filepath.Join(directory, "must-gather.log"))
}
func mustGatherCommand(context string, directory string) *exec.Cmd {
args := []string{
"adm",
"must-gather",
"--image=" + gather.Image,
"--context=" + context,
"--dest-dir=" + directory,
}
if kubeconfig != "" {
args = append(args, "--kubeconfig="+kubeconfig)
}
var remoteArgs []string
if namespaces != nil {
remoteArgs = append(remoteArgs, "--namespaces="+strings.Join(namespaces, ","))
}
// --namespaces not set, --cluster not set -> cluster=true
// --namespaces set, --cluster not set -> cluster=false
if cluster {
remoteArgs = append(remoteArgs, "--cluster=true")
} else {
remoteArgs = append(remoteArgs, "--cluster=false")
}
if addons != nil {
remoteArgs = append(remoteArgs, "--addons="+strings.Join(addons, ","))
}
if len(remoteArgs) > 0 {
args = append(args, "--", "/usr/bin/gather")
args = append(args, remoteArgs...)
}
return exec.Command("oc", args...)
}
0707010000000A000081A400000000000000000000000168A7671000001C08000000000000000000000000000000000000002200000000kubectl-gather-0.10.1/cmd/root.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"fmt"
stdlog "log"
"os"
"path/filepath"
"slices"
"strings"
"time"
"github.com/spf13/cobra"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/nirs/kubectl-gather/pkg/gather"
)
var directory string
var kubeconfig string
var contexts []string
var namespaces []string
var addons []string
var cluster bool
var remote bool
var verbose bool
var logFormat string
var log *zap.SugaredLogger
var example = ` # Gather data from all namespaces in current context in my-kubeconfig and
# store it in gather.{timestamp}.
kubectl gather --kubeconfig my-kubeconfig
# Gather data from all namespaces in clusters "dr1", "dr2" and "hub" and store
# it in "gather.local/", using default kubeconfig (~/.kube/config).
kubectl gather --contexts dr1,dr2,hub --directory gather.local
# Gather data from namespaces "my-ns" and "other-ns" in clusters "dr1", "dr2",
# and "hub", and store it in "gather.ns/".
kubectl gather --contexts dr1,dr2,hub --namespaces my-ns,other-ns --directory gather.ns
# Gather data on the remote clusters "dr1", "dr2" and "hub" and download it to
# "gather.remote/". Requires the "oc" command.
kubectl gather --contexts dr1,dr2,hub --remote --directory gather.remote
# Enable only the "logs" addon, gathering all resources and pod logs. Use
# --addons= to disable all addons.
kubectl gather --contexts dr1,dr2,hub --addons logs --directory gather.resources+logs
# Gather both cluster and namespace resources from "my-ns" and "other-ns" in clusters
# "dr1", "dr2", and "hub".
kubectl gather --contexts dr1,dr2,hub --namespaces my-ns,other-ns --cluster --directory gather.mixed
# Gather only cluster resources from clusters "dr1", "dr2" and "hub".
kubectl gather --contexts dr1,dr2,hub --namespace="" --cluster --directory gather.cluster`
var rootCmd = &cobra.Command{
Use: "kubectl-gather",
Short: "Gather data from clusters",
Version: gather.Version,
Example: example,
Annotations: map[string]string{
cobra.CommandDisplayNameAnnotation: "kubectl gather",
},
Run: runGather,
}
func Execute() {
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
}
}
func init() {
rootCmd.Flags().StringVarP(&directory, "directory", "d", "",
"directory for storing gathered data (default \"gather.{timestamp}\")")
// Don't set default kubeconfig, so kubeconfig is empty unless the user
// specified the option. This is required to allow running remote commands
// using in-cluster config.
rootCmd.Flags().StringVar(&kubeconfig, "kubeconfig", "",
"the kubeconfig file to use")
rootCmd.Flags().StringSliceVar(&contexts, "contexts", nil,
"comma separated list of contexts to gather data from")
rootCmd.Flags().StringSliceVarP(&namespaces, "namespaces", "n", nil,
"if specified, comma separated list of namespaces to gather data from")
rootCmd.Flags().StringSliceVar(&addons, "addons", nil,
fmt.Sprintf("if specified, comma separated list of addons to enable (available addons: %s)",
availableAddons()))
rootCmd.Flags().BoolVar(&cluster, "cluster", false,
"if true, gather cluster scoped resources, if namespaces and cluster flags are not "+
"specified, gather all resources")
rootCmd.Flags().BoolVarP(&remote, "remote", "r", false,
"run on the remote clusters (requires the \"oc\" command)")
rootCmd.Flags().BoolVarP(&verbose, "verbose", "v", false,
"be more verbose")
rootCmd.Flags().StringVar(&logFormat, "log-format", "text", "Set the logging format [text, json]")
// Use plain, machine friendly version string.
rootCmd.SetVersionTemplate("{{.Version}}\n")
}
func runGather(cmd *cobra.Command, args []string) {
if directory == "" {
directory = defaultGatherDirectory()
}
log = createLogger(directory, verbose, logFormat)
defer func() {
_ = log.Sync()
}()
if err := validateOptions(cmd); err != nil {
log.Fatal(err)
}
clusterConfigs, err := loadClusterConfigs(contexts, kubeconfig)
if err != nil {
log.Fatal(err)
}
if namespaces == nil {
log.Infof("Gathering from all namespaces")
} else if len(namespaces) > 0 {
log.Infof("Gathering from namespaces %q", namespaces)
}
if cluster {
log.Info("Gathering cluster scoped resources")
}
if addons != nil {
log.Infof("Using addons %q", addons)
} else {
log.Infof("Using all addons")
}
if !cmd.Flags().Changed("directory") {
log.Infof("Storing data in %q", directory)
}
if remote {
remoteGather(clusterConfigs)
} else {
localGather(clusterConfigs)
}
}
func validateOptions(cmd *cobra.Command) error {
// --namespaces=""
// --namespaces="" --cluster=false
if namespaces != nil && len(namespaces) == 0 && !cluster {
return fmt.Errorf("nothing to gather: specify --namespaces or --cluster")
}
// --namespaces and --cluster flags are not set
if !cmd.Flags().Changed("namespaces") && !cmd.Flags().Changed("cluster") {
cluster = true
}
return nil
}
func createLogger(directory string, verbose bool, format string) *zap.SugaredLogger {
consoleConfig := zap.NewProductionEncoderConfig()
logfileConfig := zap.NewProductionEncoderConfig()
// Use formatted timestamps instead of seconds since epoch to make it easier
// to related to other logs.
consoleConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logfileConfig.EncodeTime = zapcore.ISO8601TimeEncoder
// Use UPPERCASE log levels to make it easier to read.
consoleConfig.EncodeLevel = zapcore.CapitalLevelEncoder
logfileConfig.EncodeLevel = zapcore.CapitalLevelEncoder
var consoleEncoder zapcore.Encoder
var logfileEncoder zapcore.Encoder
switch format {
case "text":
// Caller and stacktraces are useless noise in the console text logs,
// but may be helpful in json format when the logs are consumed by
// another program.
consoleConfig.CallerKey = zapcore.OmitKey
consoleConfig.StacktraceKey = zapcore.OmitKey
consoleEncoder = zapcore.NewConsoleEncoder(consoleConfig)
// In the log file caller and stacktraces are nice to have.
logfileEncoder = zapcore.NewConsoleEncoder(logfileConfig)
case "json":
// When using json logs we want all possible info, so a program can
// consume what it needs.
consoleEncoder = zapcore.NewJSONEncoder(consoleConfig)
logfileEncoder = zapcore.NewJSONEncoder(logfileConfig)
default:
stdlog.Fatalf("Invalid log-format: %q", format)
}
if err := os.MkdirAll(directory, 0750); err != nil {
stdlog.Fatalf("Cannot create directory: %s", err)
}
logfile, err := os.Create(filepath.Join(directory, "gather.log"))
if err != nil {
stdlog.Fatalf("Cannot create log file: %s", err)
}
consoleLevel := zapcore.InfoLevel
if verbose {
consoleLevel = zapcore.DebugLevel
}
core := zapcore.NewTee(
zapcore.NewCore(logfileEncoder, zapcore.Lock(logfile), zapcore.DebugLevel),
zapcore.NewCore(consoleEncoder, zapcore.Lock(os.Stderr), consoleLevel),
)
return zap.New(core).Named("gather").Sugar()
}
func defaultGatherDirectory() string {
return time.Now().Format("gather.20060102150405")
}
func availableAddons() string {
names := gather.AvailableAddons()
slices.Sort(names)
return strings.Join(names, ", ")
}
0707010000000B000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000001B00000000kubectl-gather-0.10.1/docs0707010000000C000081A400000000000000000000000168A7671000000726000000000000000000000000000000000000002800000000kubectl-gather-0.10.1/docs/internals.md# kubectl-gather internals
## Pipeline
kubectl-gather fetch and process data in a 3 steps pipeline:
```
prepare --> gather --> inpsect
```
### Prepare step
This step runs in the goroutine calling Gather(). This step includes:
1. Looking up available namespaces - if no namespaces is given, we have list
with one iteme, the special empty namespace, gathering resources from all
namespaces.
1. Looking up API resources.
1. Queuing gatherResources(resource, namespace) for every resource and namespace in the gatherWorkqueu.
1. Close the gatherWorkqueue
This step ends when all work was queued in the resources work queue. Since the queue is unbuffered, this ends when the last gatherResources() call was queued.
### Gather step
This steps run in the gatherWorkqueue goroutines.
The workers pick up work functions from the queue and run them.
Running gather resources steps:
1. List all resources with specified type and namespaces
1. Dump every resource to the output directory
1. If an addon is registered for the resource, call addon.Inspect(). Inspecting
a resource may fetch new resources from the cluster, or queue more work in
the inspectWorkqueue.
1. When all workers are done, close the inspectWorkqueue
Workers cannot queue more work in the gatherWorkqueue. All work must be queued in prepare step.
The workers exit when there is no more work to do.
### Inspect step
This step runs in the inspectWorkqueue.
The workers running in this step pick up work functions from the queue and run them.
Work done in the inspect queue depends on the addon. Examples are:
- Copy logs from containers
- Running commands in agenet pod
- Copy logs from nodes
Workers cannot queue more work in the inspectWorkqueue. All work must be queued in gather step.
The workers exit when there is no more work to do.
0707010000000D000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000001A00000000kubectl-gather-0.10.1/e2e0707010000000E000081A400000000000000000000000168A767100000061D000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/e2e/Makefiledeploy undeploy: export KUBECONFIG = out/kubeconfig.yaml
e2e:
go build -o $@ ./cmd
test: clusters deploy
go test . -v -count=1
clusters: e2e
./e2e create
deploy:
# Deploy common deployment on both clusters.
kubectl apply -k testdata/common --context kind-c1
kubectl apply -k testdata/common --context kind-c2
# Deploy c1 deployment on c1.
kubectl apply -k testdata/c1 --context kind-c1
# Deploy c2 deployment on c2.
kubectl apply -k testdata/c2 --context kind-c2
# Wait for all deployments.
kubectl rollout status deploy common-busybox -n test-common --context kind-c1
kubectl rollout status deploy common-busybox -n test-common --context kind-c2
kubectl rollout status deploy c1-busybox -n test-c1 --context kind-c1
kubectl rollout status deploy c2-busybox -n test-c2 --context kind-c2
undeploy:
# Delete common deployment on both clusters.
kubectl delete -k testdata/common --context kind-c1 --ignore-not-found --wait=false
kubectl delete -k testdata/common --context kind-c2 --ignore-not-found --wait=false
# Delete c1 deployment on c1.
kubectl delete -k testdata/c1 --context kind-c1 --ignore-not-found --wait=false
# Delete c2 deployment on c2.
kubectl delete -k testdata/c2 --context kind-c2 --ignore-not-found --wait=false
# Wait for all deletions.
kubectl wait ns test-common --for delete --context kind-c1
kubectl wait ns test-common --for delete --context kind-c2
kubectl wait ns test-c1 --for delete --context kind-c1
kubectl wait ns test-c2 --for delete --context kind-c2
clean: e2e
./e2e delete
rm -rf out
rm -f e2e
0707010000000F000081A400000000000000000000000168A7671000000187000000000000000000000000000000000000002400000000kubectl-gather-0.10.1/e2e/README.md# End to end tests
The e2e module provide the *e2e* tool for creating test clusters,
helpers for testing, and tests.
## Requirement
macOS:
```
brew install kind podman
```
## Running the tests
```
make test
```
This creates test clusters if needed and run the tests.
## Cleaning up
```
make clean
```
This delete the test clusters, kubeconfigs, and data gathered during the
tests.
07070100000010000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/e2e/clusters07070100000011000081A400000000000000000000000168A7671000000BE7000000000000000000000000000000000000002F00000000kubectl-gather-0.10.1/e2e/clusters/clusters.gopackage clusters
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"slices"
"strings"
"sync"
"github.com/nirs/kubectl-gather/e2e/commands"
)
const (
C1 = "kind-c1"
C2 = "kind-c2"
outdir = "out"
kubeconfig = "kubeconfig.yaml"
)
var Names = []string{C1, C2}
func Kubeconfig() string {
return filepath.Join(outdir, kubeconfig)
}
func Create() error {
log.Print("Creating clusters")
if err := os.MkdirAll(outdir, 0o700); err != nil {
return err
}
if err := execute(createCluster, Names); err != nil {
return err
}
if err := createKubeconfig(); err != nil {
return err
}
log.Print("Clusters created")
return nil
}
func Delete() error {
log.Print("Deleting clusters")
if err := execute(deleteCluster, Names); err != nil {
return err
}
_ = os.Remove(Kubeconfig())
log.Print("Clusters deleted")
return nil
}
func execute(fn func(name string) error, names []string) error {
errors := make(chan error, len(names))
wg := sync.WaitGroup{}
for _, name := range names {
wg.Add(1)
go func() {
defer wg.Done()
err := fn(name)
if err != nil {
errors <- err
}
}()
}
wg.Wait()
close(errors)
for e := range errors {
return e
}
return nil
}
func createCluster(name string) error {
log.Printf("Creating cluster %q", name)
exists, err := clusterExists(name)
if err != nil {
return err
}
if exists {
log.Printf("Using existing cluster: %q", name)
return nil
}
cmd := exec.Command(
"kind", "create", "cluster",
"--name", kindName(name),
"--kubeconfig", clusterKubeconfig(name),
"--wait", "60s",
)
return commands.Run(cmd)
}
func deleteCluster(name string) error {
log.Printf("Deleting cluster %q", name)
config := clusterKubeconfig(name)
cmd := exec.Command(
"kind", "delete", "cluster",
"--name", kindName(name),
"--kubeconfig", config,
)
if err := commands.Run(cmd); err != nil {
return err
}
_ = os.Remove(config)
return nil
}
func createKubeconfig() error {
log.Printf("Creating kubconfigs %q", Kubeconfig())
var configs []string
for _, name := range Names {
configs = append(configs, clusterKubeconfig(name))
}
cmd := exec.Command("kubectl", "config", "view", "--flatten")
cmd.Env = append(os.Environ(), "KUBECONFIG="+strings.Join(configs, ":"))
log.Printf("Running %v", cmd)
data, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to merge configs: %s: %s", err, commands.Stderr(err))
}
return os.WriteFile(Kubeconfig(), data, 0640)
}
func clusterExists(name string) (bool, error) {
cmd := exec.Command("kind", "get", "clusters")
log.Printf("Running %v", cmd)
out, err := cmd.Output()
if err != nil {
return false, fmt.Errorf("failed to get clusters: %s: %s", err, commands.Stderr(err))
}
trimmed := strings.TrimSpace(string(out))
existing := strings.Split(trimmed, "\n")
return slices.Contains(existing, kindName(name)), nil
}
func kindName(name string) string {
return strings.TrimPrefix(name, "kind-")
}
func clusterKubeconfig(name string) string {
return filepath.Join(outdir, name+".yaml")
}
07070100000012000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000001E00000000kubectl-gather-0.10.1/e2e/cmd07070100000013000081A400000000000000000000000168A767100000030B000000000000000000000000000000000000002600000000kubectl-gather-0.10.1/e2e/cmd/main.gopackage main
import (
"log"
"os"
"github.com/nirs/kubectl-gather/e2e/clusters"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "e2e",
Short: "Manage the e2e testing environment",
}
var createCmd = &cobra.Command{
Use: "create",
Short: "Create the e2e environment",
Run: func(cmd *cobra.Command, args []string) {
if err := clusters.Create(); err != nil {
log.Fatal(err)
}
},
}
var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete the e2e environment",
Run: func(cmd *cobra.Command, args []string) {
if err := clusters.Delete(); err != nil {
log.Fatal(err)
}
},
}
func init() {
rootCmd.AddCommand(deleteCmd)
rootCmd.AddCommand(createCmd)
}
func main() {
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
}
}
07070100000014000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/e2e/commands07070100000015000081A400000000000000000000000168A7671000000284000000000000000000000000000000000000002F00000000kubectl-gather-0.10.1/e2e/commands/commands.gopackage commands
import (
"bufio"
"io"
"log"
"os/exec"
)
// Run a command logging lines from stderr.
func Run(cmd *exec.Cmd) error {
log.Printf("Running %v", cmd)
pipe, err := cmd.StderrPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
reader := bufio.NewReader(pipe)
for {
line, _, err := reader.ReadLine()
if err != nil {
if err != io.EOF {
log.Printf("Failed to read from command stderr: %s", err)
}
break
}
log.Print(string(line))
}
return cmd.Wait()
}
func Stderr(err error) []byte {
if ee, ok := err.(*exec.ExitError); ok {
return ee.Stderr
}
return nil
}
07070100000016000081A400000000000000000000000168A7671000000037000000000000000000000000000000000000002100000000kubectl-gather-0.10.1/e2e/e2e.gopackage e2e
const kubectlGather = "../kubectl-gather"
07070100000017000081A400000000000000000000000168A76710000032F6000000000000000000000000000000000000002900000000kubectl-gather-0.10.1/e2e/gather_test.gopackage e2e
import (
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/nirs/kubectl-gather/e2e/clusters"
"github.com/nirs/kubectl-gather/e2e/commands"
"github.com/nirs/kubectl-gather/e2e/validate"
)
var (
commonClusterResources = []string{
"cluster/namespaces/test-common.yaml",
}
commonNamespacedResources = []string{
"namespaces/test-common/persistentvolumeclaims/common-pvc1.yaml",
"namespaces/test-common/pods/common-busybox-*.yaml",
"namespaces/test-common/apps/deployments/common-busybox.yaml",
"namespaces/test-common/apps/replicasets/common-busybox-*.yaml",
"namespaces/test-common/configmaps/kube-root-ca.crt.yaml",
"namespaces/test-common/serviceaccounts/default.yaml",
}
commonLogResources = []string{
"namespaces/test-common/pods/common-busybox-*/busybox/current.log",
}
commonPVCResources = []string{
"cluster/persistentvolumes/common-pv1.yaml",
}
c1ClusterNodes = []string{
"cluster/nodes/c1-control-plane.yaml",
}
c1ClusterResources = []string{
"cluster/namespaces/test-c1.yaml",
}
c1NamespaceResources = []string{
"namespaces/test-c1/persistentvolumeclaims/c1-pvc1.yaml",
"namespaces/test-c1/pods/c1-busybox-*.yaml",
"namespaces/test-c1/apps/deployments/c1-busybox.yaml",
"namespaces/test-c1/apps/replicasets/c1-busybox-*.yaml",
"namespaces/test-c1/configmaps/kube-root-ca.crt.yaml",
"namespaces/test-c1/serviceaccounts/default.yaml",
}
c1LogResources = []string{
"namespaces/test-c1/pods/c1-busybox-*/busybox/current.log",
}
c1PVCResources = []string{
"cluster/persistentvolumes/c1-pv1.yaml",
}
c2ClusterNodes = []string{
"cluster/nodes/c2-control-plane.yaml",
}
c2ClusterResources = []string{
"cluster/namespaces/test-c2.yaml",
}
c2NamespaceResources = []string{
"namespaces/test-c2/persistentvolumeclaims/c2-pvc1.yaml",
"namespaces/test-c2/pods/c2-busybox-*.yaml",
"namespaces/test-c2/apps/deployments/c2-busybox.yaml",
"namespaces/test-c2/apps/replicasets/c2-busybox-*.yaml",
"namespaces/test-c2/configmaps/kube-root-ca.crt.yaml",
"namespaces/test-c2/serviceaccounts/default.yaml",
}
c2LogResources = []string{
"namespaces/test-c2/pods/c2-busybox-*/busybox/current.log",
}
c2PVCResources = []string{
"cluster/persistentvolumes/c2-pv1.yaml",
}
defaultPVCResources = []string{
"cluster/storage.k8s.io/storageclasses/standard.yaml",
}
)
func TestGather(t *testing.T) {
outputDir := "out/test-gather"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validateGatherAll(t, outputDir)
}
func TestGatherClusterTrue(t *testing.T) {
outputDir := "out/test-gather-cluster-true"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--cluster=true",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validateGatherAll(t, outputDir)
}
func TestGatherClusterFalse(t *testing.T) {
outputDir := "out/test-gather-cluster-false"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--cluster=false",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonPVCResources,
commonNamespacedResources,
commonLogResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1PVCResources,
c1NamespaceResources,
c1LogResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2PVCResources,
c2NamespaceResources,
c2LogResources,
)
validate.Missing(t, outputDir, clusters.Names,
commonClusterResources,
)
validate.Missing(t, outputDir, []string{clusters.C1},
c1ClusterNodes,
c1ClusterResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c2ClusterNodes,
c2ClusterResources,
)
}
func TestGatherEmptyNamespaces(t *testing.T) {
outputDir := "out/test-gather-empty-namespaces"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces=", "",
"--directory", outputDir,
)
if err := commands.Run(cmd); err == nil {
t.Errorf("kubectl-gather should fail, but it succeeded")
}
validateNoClusterDir(t, outputDir)
}
func TestGatherEmptyNamespacesClusterFalse(t *testing.T) {
outputDir := "out/test-gather-empty-namespaces-cluster-false"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces=", "",
"--cluster=false",
"--directory", outputDir,
)
if err := commands.Run(cmd); err == nil {
t.Errorf("kubectl-gather should fail, but it succeeded")
}
validateNoClusterDir(t, outputDir)
}
func TestGatherEmptyNamespacesClusterTrue(t *testing.T) {
outputDir := "out/test-gather-empty-namespaces-cluster-true"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces=", "",
"--cluster=true",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonClusterResources,
commonPVCResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1ClusterNodes,
c1ClusterResources,
c1PVCResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2ClusterNodes,
c2ClusterResources,
c2PVCResources,
)
validate.Missing(t, outputDir, clusters.Names,
commonNamespacedResources,
commonLogResources,
)
validate.Missing(t, outputDir, []string{clusters.C1},
c1NamespaceResources,
c1LogResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c2NamespaceResources,
c2LogResources,
)
}
func TestGatherSpecificNamespaces(t *testing.T) {
outputDir := "out/test-gather-specific-namespaces"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validateSpecificNamespaces(t, outputDir)
}
func TestGatherSpecificNamespacesClusterFalse(t *testing.T) {
outputDir := "out/test-gather-specific-namespaces-cluster-false"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1",
"--cluster=false",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validateSpecificNamespaces(t, outputDir)
}
func TestGatherSpecificNamespacesClusterTrue(t *testing.T) {
outputDir := "out/test-gather-specific-namespaces-cluster-true"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1",
"--cluster=true",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonClusterResources,
commonPVCResources,
commonNamespacedResources,
commonLogResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1ClusterNodes,
c1ClusterResources,
c1PVCResources,
c1NamespaceResources,
c1LogResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2ClusterNodes,
c2ClusterResources,
c2PVCResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c2NamespaceResources,
c2LogResources,
)
}
func TestGatherAddonsLogs(t *testing.T) {
outputDir := "out/test-gather-addons-logs"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1,test-c2",
"--addons", "logs",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
commonLogResources,
commonClusterResources,
commonNamespacedResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1LogResources,
c1ClusterResources,
c1NamespaceResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2LogResources,
c2ClusterResources,
c2NamespaceResources,
)
validate.Missing(t, outputDir, clusters.Names,
defaultPVCResources,
commonPVCResources,
)
validate.Missing(t, outputDir, []string{clusters.C1},
c1PVCResources,
c2PVCResources,
)
}
func TestGatherAddonsPVCs(t *testing.T) {
outputDir := "out/test-gather-addons-pvcs"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1,test-c2",
"--addons", "pvcs",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonPVCResources,
commonClusterResources,
commonNamespacedResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1PVCResources,
c1ClusterResources,
c1NamespaceResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2PVCResources,
c2ClusterResources,
c2NamespaceResources,
)
validate.Missing(t, outputDir, clusters.Names,
commonLogResources,
c1LogResources,
c2LogResources,
)
}
func TestGatherAddonsEmpty(t *testing.T) {
outputDir := "out/test-gather-addons-empty"
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--namespaces", "test-common,test-c1,test-c2",
"--addons=",
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.Exists(t, outputDir, clusters.Names,
commonClusterResources,
commonNamespacedResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1ClusterResources,
c1NamespaceResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2ClusterResources,
c2NamespaceResources,
)
validate.Missing(t, outputDir, clusters.Names,
defaultPVCResources,
commonLogResources,
commonPVCResources,
)
validate.Missing(t, outputDir, []string{clusters.C1},
c1LogResources,
c1PVCResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c2LogResources,
c2PVCResources,
)
}
func TestJSONLogs(t *testing.T) {
outputDir := "out/test-json-logs"
logPath := filepath.Join(outputDir, "gather.log")
cmd := exec.Command(
kubectlGather,
"--contexts", strings.Join(clusters.Names, ","),
"--kubeconfig", clusters.Kubeconfig(),
"--directory", outputDir,
"--log-format", "json",
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
validate.JSONLog(t, logPath)
}
// Test helpers
func validateGatherAll(t *testing.T, outputDir string) {
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonClusterResources,
commonPVCResources,
commonNamespacedResources,
commonLogResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1ClusterNodes,
c1ClusterResources,
c1PVCResources,
c1NamespaceResources,
c1LogResources,
)
validate.Exists(t, outputDir, []string{clusters.C2},
c2ClusterNodes,
c2ClusterResources,
c2PVCResources,
c2NamespaceResources,
c2LogResources,
)
validate.Missing(t, outputDir, []string{clusters.C1},
c2ClusterNodes,
c2ClusterResources,
c2PVCResources,
c2NamespaceResources,
c2LogResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c1ClusterNodes,
c1ClusterResources,
c1PVCResources,
c1NamespaceResources,
c1LogResources,
)
}
func validateSpecificNamespaces(t *testing.T, outputDir string) {
validate.Exists(t, outputDir, clusters.Names,
defaultPVCResources,
commonClusterResources,
commonPVCResources,
commonNamespacedResources,
)
validate.Exists(t, outputDir, []string{clusters.C1},
c1ClusterResources,
c1PVCResources,
c1NamespaceResources,
)
validate.Missing(t, outputDir, []string{clusters.C2},
c2ClusterResources,
c2PVCResources,
c2NamespaceResources,
)
}
func validateNoClusterDir(t *testing.T, outputDir string) {
for _, cluster := range clusters.Names {
clusterDir := filepath.Join(outputDir, cluster)
if validate.PathExists(t, clusterDir) {
t.Errorf("cluster directory %q should not be created", clusterDir)
}
}
}
07070100000018000081A400000000000000000000000168A76710000009FC000000000000000000000000000000000000002100000000kubectl-gather-0.10.1/e2e/go.modmodule github.com/nirs/kubectl-gather/e2e
go 1.24.0
toolchain go1.24.5
require (
github.com/nirs/kubectl-gather v0.8.0
github.com/spf13/cobra v1.9.1
)
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.6.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.31.1
k8s.io/apimachinery v0.31.1 // indirect
k8s.io/cli-runtime v0.31.1 // indirect
k8s.io/client-go v0.31.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0
)
// Test current branch.
replace github.com/nirs/kubectl-gather => ../
07070100000019000081A400000000000000000000000168A7671000003D12000000000000000000000000000000000000002100000000kubectl-gather-0.10.1/e2e/go.sumgithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk=
k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U=
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 h1:/amS69DLm09mtbFtN3+LyygSFohnYGMseF8iv+2zulg=
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
0707010000001A000081A400000000000000000000000168A7671000000B11000000000000000000000000000000000000002900000000kubectl-gather-0.10.1/e2e/output_test.gopackage e2e
import (
"os/exec"
"path/filepath"
"testing"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
"sigs.k8s.io/yaml"
"github.com/nirs/kubectl-gather/e2e/clusters"
"github.com/nirs/kubectl-gather/e2e/commands"
"github.com/nirs/kubectl-gather/pkg/gather"
)
func TestOutput(t *testing.T) {
outputDir := "out/test-output"
cmd := exec.Command(
kubectlGather,
"--contexts", clusters.C1,
"--kubeconfig", clusters.Kubeconfig(),
"--directory", outputDir,
)
if err := commands.Run(cmd); err != nil {
t.Errorf("kubectl-gather failed: %s", err)
}
reader := gather.NewOutputReader(filepath.Join(outputDir, clusters.C1))
t.Run("deployment", func(t *testing.T) {
name := "common-busybox"
data, err := reader.ReadResource("test-common", "apps/deployments", name)
if err != nil {
t.Fatal(err)
}
deployment := apps.Deployment{}
if err := yaml.Unmarshal(data, &deployment); err != nil {
t.Fatal(err)
}
if deployment.Name != name {
t.Errorf("expected deployment name %q, got %s", name, deployment.Name)
}
t.Logf("Read deployment %q", deployment.Name)
})
t.Run("pods", func(t *testing.T) {
pods, err := reader.ListResources("test-common", "pods")
if err != nil {
t.Fatal(err)
}
if len(pods) == 0 {
t.Fatalf("no pod found")
}
t.Logf("Listed pods %q", pods)
for _, name := range pods {
data, err := reader.ReadResource("test-common", "pods", name)
if err != nil {
t.Fatal(err)
}
pod := core.Pod{}
if err := yaml.Unmarshal(data, &pod); err != nil {
t.Fatal(err)
}
if pod.Name != name {
t.Errorf("expected pod name %q, got %s", name, pod.Name)
}
t.Logf("Read pod %q", pod.Name)
}
})
t.Run("cluster scope", func(t *testing.T) {
namespaces, err := reader.ListResources("", "namespaces")
if err != nil {
t.Fatal(err)
}
if len(namespaces) == 0 {
t.Fatalf("no namespaces found")
}
t.Logf("Listed namespaces %q", namespaces)
for _, name := range namespaces {
data, err := reader.ReadResource("", "namespaces", name)
if err != nil {
t.Fatal(err)
}
namespace := core.Namespace{}
if err := yaml.Unmarshal(data, &namespace); err != nil {
t.Fatal(err)
}
if namespace.Name != name {
t.Errorf("expected namespace name %q, got %s", name, namespace.Name)
}
t.Logf("Read namespace %q", namespace.Name)
}
})
t.Run("missing namespaced", func(t *testing.T) {
found, err := reader.ListResources("test-common", "missing")
if err != nil {
t.Fatal(err)
}
if len(found) != 0 {
t.Errorf("expected empty slice, got %v", found)
}
})
t.Run("missing cluster scope", func(t *testing.T) {
found, err := reader.ListResources("", "missing")
if err != nil {
t.Fatal(err)
}
if len(found) != 0 {
t.Errorf("expected empty slice, got %v", found)
}
})
}
0707010000001B000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/e2e/testdata0707010000001C000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002800000000kubectl-gather-0.10.1/e2e/testdata/base0707010000001D000081A400000000000000000000000168A76710000002CC000000000000000000000000000000000000003400000000kubectl-gather-0.10.1/e2e/testdata/base/deploy.yaml---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: busybox
name: busybox
spec:
replicas: 1
selector:
matchLabels:
app: busybox
template:
metadata:
labels:
app: busybox
spec:
containers:
- image: quay.io/nirsof/busybox:stable
name: busybox
command:
- sh
- -c
- |
trap exit TERM
while true; do
echo $(date) | tee -a /mnt/test/log
sync
sleep 10 &
wait
done
volumeMounts:
- name: pvc1
mountPath: /mnt/test
volumes:
- name: pvc1
persistentVolumeClaim:
claimName: pvc1
0707010000001E000081A400000000000000000000000168A7671000000044000000000000000000000000000000000000003B00000000kubectl-gather-0.10.1/e2e/testdata/base/kustomization.yaml---
resources:
- pv.yaml
- ns.yaml
- pvc.yaml
- deploy.yaml
0707010000001F000081A400000000000000000000000168A767100000003A000000000000000000000000000000000000003000000000kubectl-gather-0.10.1/e2e/testdata/base/ns.yaml---
apiVersion: v1
kind: Namespace
metadata:
name: test
07070100000020000081A400000000000000000000000168A76710000000C4000000000000000000000000000000000000003000000000kubectl-gather-0.10.1/e2e/testdata/base/pv.yaml---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
spec:
storageClassName: standard
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: MOUNT_PATH
07070100000021000081A400000000000000000000000168A76710000000BA000000000000000000000000000000000000003100000000kubectl-gather-0.10.1/e2e/testdata/base/pvc.yaml---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
07070100000022000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002600000000kubectl-gather-0.10.1/e2e/testdata/c107070100000023000081A400000000000000000000000168A7671000000113000000000000000000000000000000000000003900000000kubectl-gather-0.10.1/e2e/testdata/c1/kustomization.yaml---
resources:
- ../base
namePrefix: c1-
namespace: test-c1
patches:
# Patch the mount point to match the name prefix.
- target:
kind: PersistentVolume
name: pv1
patch: |-
- op: replace
path: /spec/hostPath/path
value: /mnt/c1-pv1
07070100000024000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002600000000kubectl-gather-0.10.1/e2e/testdata/c207070100000025000081A400000000000000000000000168A7671000000113000000000000000000000000000000000000003900000000kubectl-gather-0.10.1/e2e/testdata/c2/kustomization.yaml---
resources:
- ../base
namePrefix: c2-
namespace: test-c2
patches:
# Patch the mount point to match the name prefix.
- target:
kind: PersistentVolume
name: pv1
patch: |-
- op: replace
path: /spec/hostPath/path
value: /mnt/c2-pv1
07070100000026000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002A00000000kubectl-gather-0.10.1/e2e/testdata/common07070100000027000081A400000000000000000000000168A767100000011F000000000000000000000000000000000000003D00000000kubectl-gather-0.10.1/e2e/testdata/common/kustomization.yaml---
resources:
- ../base
namePrefix: common-
namespace: test-common
patches:
# Patch the mount point to match the name prefix.
- target:
kind: PersistentVolume
name: pv1
patch: |-
- op: replace
path: /spec/hostPath/path
value: /mnt/common-pv1
07070100000028000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002300000000kubectl-gather-0.10.1/e2e/validate07070100000029000081A400000000000000000000000168A76710000007C8000000000000000000000000000000000000002F00000000kubectl-gather-0.10.1/e2e/validate/validate.gopackage validate
import (
"encoding/json"
"os"
"path/filepath"
"slices"
"testing"
)
func Exists(t *testing.T, outputDir string, clusterNames []string, resources ...[]string) {
if !PathExists(t, outputDir) {
t.Fatalf("output directory %q does not exist", outputDir)
}
for _, cluster := range clusterNames {
clusterDir := filepath.Join(outputDir, cluster)
if !PathExists(t, clusterDir) {
t.Fatalf("cluster directory %q does not exist", clusterDir)
}
for _, pattern := range slices.Concat(resources...) {
resource := filepath.Join(clusterDir, pattern)
matches, err := filepath.Glob(resource)
if err != nil {
t.Fatal(err)
}
if len(matches) == 0 {
t.Errorf("resource %q does not exist", resource)
}
}
}
}
func Missing(t *testing.T, outputDir string, clusterNames []string, resources ...[]string) {
if !PathExists(t, outputDir) {
t.Fatalf("output directory %q does not exist", outputDir)
}
for _, cluster := range clusterNames {
clusterDir := filepath.Join(outputDir, cluster)
for _, pattern := range slices.Concat(resources...) {
resource := filepath.Join(clusterDir, pattern)
matches, err := filepath.Glob(resource)
if err != nil {
t.Fatal(err)
}
if len(matches) > 0 {
t.Errorf("resource %q should not exist: %q", resource, matches)
}
}
}
}
func JSONLog(t *testing.T, logPath string) {
if !PathExists(t, logPath) {
t.Fatalf("log %q does not exist", logPath)
}
file, err := os.Open(logPath)
if err != nil {
t.Fatal(err)
}
defer file.Close()
decoder := json.NewDecoder(file)
lineNum := 0
for decoder.More() {
lineNum++
var jsonData map[string]interface{}
if err := decoder.Decode(&jsonData); err != nil {
t.Fatalf("line %d is not valid JSON: %v", lineNum, err)
}
}
}
func PathExists(t *testing.T, path string) bool {
if _, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
t.Fatalf("error checking path %q: %v", path, err)
}
return false
}
return true
}
0707010000002A000081ED00000000000000000000000168A7671000000132000000000000000000000000000000000000001D00000000kubectl-gather-0.10.1/gather#!/bin/bash
# SPDX-FileCopyrightText: The kubectl-gather authors
# SPDX-License-Identifier: Apache-2.0
# oc adm must-gather adapter for running kubectl-gather.
base="/must-gather"
mkdir -p "$base"
printf "gather\n$(kubectl-gather --version)\n" > "$base/version"
kubectl-gather --directory "$base" "$@"
0707010000002B000081A400000000000000000000000168A767100000096B000000000000000000000000000000000000001D00000000kubectl-gather-0.10.1/go.modmodule github.com/nirs/kubectl-gather
go 1.24.0
toolchain go1.24.5
require (
github.com/spf13/cobra v1.9.1
go.uber.org/zap v1.27.0
k8s.io/api v0.31.1
k8s.io/apimachinery v0.31.1
k8s.io/cli-runtime v0.31.1
k8s.io/client-go v0.31.1
)
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.6.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
0707010000002C000081A400000000000000000000000168A7671000003CFC000000000000000000000000000000000000001D00000000kubectl-gather-0.10.1/go.sumgithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk=
k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U=
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 h1:/amS69DLm09mtbFtN3+LyygSFohnYGMseF8iv+2zulg=
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
0707010000002D000081ED00000000000000000000000168A7671000000089000000000000000000000000000000000000002E00000000kubectl-gather-0.10.1/kubectl_complete-gather#!/bin/sh
# SPDX-FileCopyrightText: The kubectl-gather authors
# SPDX-License-Identifier: Apache-2.0
exec kubectl gather __complete "$@"
0707010000002E000081A400000000000000000000000168A76710000000B8000000000000000000000000000000000000001E00000000kubectl-gather-0.10.1/main.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package main
import "github.com/nirs/kubectl-gather/cmd"
func main() {
cmd.Execute()
}
0707010000002F000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000001A00000000kubectl-gather-0.10.1/pkg07070100000030000041ED00000000000000000000000268A7671000000000000000000000000000000000000000000000002100000000kubectl-gather-0.10.1/pkg/gather07070100000031000081A400000000000000000000000168A7671000000710000000000000000000000000000000000000002B00000000kubectl-gather-0.10.1/pkg/gather/addons.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"net/http"
"slices"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
)
type AddonBackend interface {
// Config returns the rest config for this cluster that can be used to
// create a new client.
Config() *rest.Config
// HTTPClient returns the http client connected to the cluster. It can be
// used to create a new client sharing the same http client.
HTTPClient() *http.Client
// Output returns the output for this gathering.
Output() *OutputDirectory
// Options returns gathering options for this cluster.
Options() *Options
// Queue function on the work queue.
Queue(WorkFunc)
// GatherResource gathers the specified resource asynchronically.
GatherResource(schema.GroupVersionResource, types.NamespacedName)
}
type addonFunc func(AddonBackend) (Addon, error)
type addonInfo struct {
Resource string
AddonFunc addonFunc
}
var addonRegistry = map[string]addonInfo{}
func registerAddon(name string, ai addonInfo) {
addonRegistry[name] = ai
}
func createAddons(backend AddonBackend) (map[string]Addon, error) {
registry := map[string]Addon{}
for name, addonInfo := range addonRegistry {
if addonEnabled(name, backend.Options()) {
addon, err := addonInfo.AddonFunc(backend)
if err != nil {
return nil, err
}
registry[addonInfo.Resource] = addon
}
}
return registry, nil
}
func addonEnabled(name string, opts *Options) bool {
return opts.Addons == nil || slices.Contains(opts.Addons, name)
}
func AvailableAddons() []string {
addonNames := make([]string, 0, len(addonRegistry))
for name := range addonRegistry {
addonNames = append(addonNames, name)
}
return addonNames
}
07070100000032000081A400000000000000000000000168A7671000000E07000000000000000000000000000000000000002A00000000kubectl-gather-0.10.1/pkg/gather/agent.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
)
const (
agentPodTimeoutSeconds = 60
)
type AgentPod struct {
Client *kubernetes.Clientset
Log *zap.SugaredLogger
Pod *corev1.Pod
}
func NewAgentPod(name string, client *kubernetes.Clientset, log *zap.SugaredLogger) *AgentPod {
privileged := true
root := int64(0)
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "gather-agent-" + name,
// TODO: Use a tempoary random gather namespace so we don't leave
// leftovers in real namespaces, and if we leave leftovers is it
// easy to clean up.
Namespace: corev1.NamespaceDefault,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "agent",
// TODO: make configurable
Image: "quay.io/nirsof/busybox:stable-musl",
// The agent should stop automatically so if we fail to
// delete it, so we don't waste resources on the target
// cluster. We trap SIGTERM so it terminates immediately
// when deleted.
Command: []string{"sh", "-c", "trap exit TERM; sleep 900"},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
RunAsUser: &root,
},
},
},
},
}
return &AgentPod{Pod: &pod, Client: client, Log: log}
}
func (a *AgentPod) Create() error {
a.Log.Debugf("Starting agent pod %q", a)
pod, err := a.Client.CoreV1().Pods(a.Pod.Namespace).
Create(context.TODO(), a.Pod, metav1.CreateOptions{})
if err != nil {
return err
}
a.Pod = pod
return nil
}
type agentWatcher struct {
agent *AgentPod
ctx context.Context
}
func (w *agentWatcher) Watch(opts metav1.ListOptions) (watch.Interface, error) {
w.agent.Log.Debugf("Watching agent pod %q", w.agent)
opts.FieldSelector = fields.OneTermEqualSelector(metav1.ObjectNameField, w.agent.Pod.Name).String()
return w.agent.Client.CoreV1().Pods(w.agent.Pod.Namespace).Watch(w.ctx, opts)
}
func (a *AgentPod) WaitUntilRunning() error {
ctx, cancel := context.WithTimeout(context.Background(), agentPodTimeoutSeconds*time.Second)
defer cancel()
w := agentWatcher{agent: a, ctx: ctx}
watcher, err := toolswatch.NewRetryWatcher(a.Pod.ResourceVersion, &w)
if err != nil {
return err
}
defer watcher.Stop()
for event := range watcher.ResultChan() {
switch event.Type {
case watch.Modified, watch.Added:
pod := event.Object.(*corev1.Pod)
switch pod.Status.Phase {
case corev1.PodRunning:
return nil
case corev1.PodFailed:
return fmt.Errorf("agent pod %q failed", a)
case corev1.PodSucceeded:
return fmt.Errorf("agent pod %q terminated", a)
}
case watch.Error:
err := apierrors.FromObject(event.Object)
return fmt.Errorf("agent pod %q watch error: %s", a, err)
case watch.Deleted:
return fmt.Errorf("agent pod %q was deleted", a)
}
}
return fmt.Errorf("timeout waiting for agent pod %q running phase", a)
}
func (a *AgentPod) Delete() {
a.Log.Debugf("Deleting agent pod %q", a)
err := a.Client.CoreV1().Pods(a.Pod.Namespace).
Delete(context.TODO(), a.Pod.Name, metav1.DeleteOptions{})
if err != nil {
a.Log.Warnf("Cannot delete agent pod %q: %s", a, err)
}
}
func (a AgentPod) String() string {
return a.Pod.Namespace + "/" + a.Pod.Name
}
07070100000033000081A400000000000000000000000168A7671000000311000000000000000000000000000000000000002C00000000kubectl-gather-0.10.1/pkg/gather/backend.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"net/http"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
)
type gatherBackend struct {
g *Gatherer
wq *WorkQueue
}
func (b *gatherBackend) Config() *rest.Config {
return b.g.config
}
func (b *gatherBackend) HTTPClient() *http.Client {
return b.g.httpClient
}
func (b *gatherBackend) Options() *Options {
return b.g.opts
}
func (b *gatherBackend) Output() *OutputDirectory {
return &b.g.output
}
func (b *gatherBackend) Queue(work WorkFunc) {
b.wq.Queue(work)
}
func (b *gatherBackend) GatherResource(gvr schema.GroupVersionResource, name types.NamespacedName) {
b.g.gatherResource(gvr, name)
}
07070100000034000081A400000000000000000000000168A7671000000661000000000000000000000000000000000000002D00000000kubectl-gather-0.10.1/pkg/gather/commands.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
)
type RemoteCommand struct {
pod *corev1.Pod
opts *Options
log *zap.SugaredLogger
directory string
}
var specialCharacters *regexp.Regexp
func NewRemoteCommand(pod *corev1.Pod, opts *Options, log *zap.SugaredLogger, directory string) *RemoteCommand {
return &RemoteCommand{pod: pod, opts: opts, log: log, directory: directory}
}
func (c *RemoteCommand) Gather(command ...string) error {
start := time.Now()
args := []string{
"exec",
c.pod.Name,
"--container=" + c.pod.Spec.Containers[0].Name,
"--namespace=" + c.pod.Namespace,
}
if c.opts.Kubeconfig != "" {
args = append(args, "--kubeconfig="+c.opts.Kubeconfig)
}
if c.opts.Context != "" {
args = append(args, "--context="+c.opts.Context)
}
args = append(args, "--")
args = append(args, command...)
filename := c.Filename(command...)
writer, err := os.Create(filepath.Join(c.directory, filename))
if err != nil {
return err
}
defer writer.Close()
cmd := exec.Command("kubectl", args...)
cmd.Stdout = writer
c.log.Debugf("Running command: %s", cmd)
err = cmd.Run()
c.log.Debugf("Gathered %q in %.3f seconds", filename, time.Since(start).Seconds())
return err
}
func (c *RemoteCommand) Filename(command ...string) string {
name := strings.Join(command, " ")
return specialCharacters.ReplaceAllString(name, "-")
}
func init() {
specialCharacters = regexp.MustCompile(`[^\w\.\/]+`)
}
07070100000035000081A400000000000000000000000168A7671000000D85000000000000000000000000000000000000002A00000000kubectl-gather-0.10.1/pkg/gather/files.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"bytes"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
)
type RemoteDirectory struct {
pod *corev1.Pod
opts *Options
log *zap.SugaredLogger
}
var tarFileChangedError *regexp.Regexp
func NewRemoteDirectory(pod *corev1.Pod, opts *Options, log *zap.SugaredLogger) *RemoteDirectory {
return &RemoteDirectory{pod: pod, opts: opts, log: log}
}
func (d *RemoteDirectory) Gather(src string, dst string) error {
// We run remote tar and pipe the output to local tar:
// kubectl exec ... -- tar cf - src | tar xf - -C dst
var remoteError bytes.Buffer
remoteTar := d.remoteTarCommand(src)
remoteTar.Stderr = &remoteError
pipe, err := remoteTar.StdoutPipe()
if err != nil {
return err
}
var localError bytes.Buffer
localTar := d.localTarCommand(dst, d.pathComponents(src))
localTar.Stderr = &localError
localTar.Stdin = pipe
d.log.Debugf("Starting remote tar: %s", remoteTar)
err = remoteTar.Start()
if err != nil {
return err
}
d.log.Debugf("Starting local tar: %s", localTar)
err = localTar.Start()
if err != nil {
d.silentTerminate(remoteTar)
return err
}
// Order is important: Must wait for local tar first - if the remote tar
// fails, the local tar exit. However if the local tar fails, the remote tar
// blocks forever.
localErr := localTar.Wait()
remoteErr := remoteTar.Wait()
if remoteErr != nil {
stderr := remoteError.String()
if !d.isFileChangedError(remoteErr, stderr) {
return fmt.Errorf("remote tar error: %s: %q", remoteErr, stderr)
}
}
if localErr != nil {
return fmt.Errorf("local tar error: %s: %q", localErr, localError.String())
}
return nil
}
func (d *RemoteDirectory) isFileChangedError(err error, stderr string) bool {
// tar may fail with exitcode 1 only when a file changed while copying it.
// This is expected condition for log files so we must ignore it. However,
// kubectl also fails with exit code 1, for example if the pod is not found
// so we cannot rely on the exit code.
exitErr, ok := err.(*exec.ExitError)
return ok && exitErr.ExitCode() == 1 && tarFileChangedError.MatchString(stderr)
}
func (d *RemoteDirectory) remoteTarCommand(src string) *exec.Cmd {
args := []string{
"exec",
d.pod.Name,
"--namespace=" + d.pod.Namespace,
"--container=" + d.pod.Spec.Containers[0].Name,
}
if d.opts.Kubeconfig != "" {
args = append(args, "--kubeconfig="+d.opts.Kubeconfig)
}
if d.opts.Context != "" {
args = append(args, "--context="+d.opts.Context)
}
args = append(args, "--", "tar", "cf", "-", src)
return exec.Command("kubectl", args...)
}
func (d *RemoteDirectory) localTarCommand(dst string, strip int) *exec.Cmd {
args := []string{
"xf",
"-",
"--directory=" + dst,
"--strip-components=" + strconv.Itoa(strip),
}
return exec.Command("tar", args...)
}
func (d *RemoteDirectory) silentTerminate(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
d.log.Warnf("Cannot kill command %v: %s", cmd, err)
return
}
// Command was terminated by signal 9.
_ = cmd.Wait()
}
func (d *RemoteDirectory) pathComponents(s string) int {
sep := string(os.PathSeparator)
trimmed := strings.Trim(s, sep)
return strings.Count(trimmed, sep) + 1
}
func init() {
tarFileChangedError = regexp.MustCompile(`(?im)^tar: .+ file changed as we read it$`)
}
07070100000036000081A400000000000000000000000168A76710000031C0000000000000000000000000000000000000002B00000000kubectl-gather-0.10.1/pkg/gather/gather.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"bufio"
"context"
"fmt"
"io"
"net/http"
"slices"
"sync"
"time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
)
// Based on stats from OCP cluster, this value keeps payload size under 4 MiB in
// most cases. Higher values decrease the number of requests and increase CPU
// time and memory usage. TODO: Needs more testing to find the optimal value.
const listResourcesLimit = 100
// Number of workers serving a work queue.
const workQueueSize = 6
// Replaced during build with actual values.
var Version = "latest"
var Image = "quay.io/nirsof/gather:latest"
type Options struct {
Kubeconfig string
Context string
Namespaces []string
Addons []string
Cluster bool
Log *zap.SugaredLogger
}
type Addon interface {
// Inspect a resource and gather related data.
Inspect(*unstructured.Unstructured) error
}
type Gatherer struct {
config *rest.Config
httpClient *http.Client
client *dynamic.DynamicClient
addons map[string]Addon
output OutputDirectory
opts *Options
gatherQueue *WorkQueue
inspectQueue *WorkQueue
log *zap.SugaredLogger
mutex sync.Mutex
resources map[string]struct{}
}
type resourceInfo struct {
schema.GroupVersionResource
Namespaced bool
}
// Name returns the full name of the resource, used as the directory name in the
// gather directory. Resources with an empty group are gathered in the cluster
// or namespace directory. Resources with non-empty group are gathered in a
// group directory.
func (r *resourceInfo) Name() string {
if r.Group == "" {
return r.Resource
}
return r.Group + "/" + r.Resource
}
func New(config *rest.Config, directory string, opts Options) (*Gatherer, error) {
// We want list all api resources (~80) quickly, gather logs from all pods,
// and run various commands on the nodes. This change makes gathering 60
// times faster than the defaults. (9.6 seconds -> 0.15 seconds).
config.QPS = 50
config.Burst = 100
// Disable the useless deprecated warnings.
// TODO: Make this configurable to allow arnings during development.
config.WarningHandler = rest.NoWarnings{}
httpClient, err := rest.HTTPClientFor(config)
if err != nil {
return nil, err
}
client, err := dynamic.NewForConfigAndClient(config, httpClient)
if err != nil {
return nil, err
}
g := &Gatherer{
config: config,
httpClient: httpClient,
client: client,
output: OutputDirectory{base: directory},
opts: &opts,
gatherQueue: NewWorkQueue(workQueueSize),
inspectQueue: NewWorkQueue(workQueueSize),
log: opts.Log,
resources: make(map[string]struct{}),
}
backend := &gatherBackend{
g: g,
wq: g.inspectQueue,
}
addons, err := createAddons(backend)
if err != nil {
return nil, err
}
g.addons = addons
return g, nil
}
func (g *Gatherer) Gather() error {
start := time.Now()
g.gatherQueue.Start()
g.inspectQueue.Start()
defer func() {
// Safe close even if some work was queued in prepare before it failed.
g.gatherQueue.Close()
_ = g.gatherQueue.Wait()
g.inspectQueue.Close()
_ = g.inspectQueue.Wait()
}()
// Start the prepare step, looking up namespaces and API resources and
// queuing work on the gather workqueue.
if err := g.prepare(); err != nil {
return err
}
g.log.Debugf("Prepare step finished in %.2f seconds", time.Since(start).Seconds())
// No more work can be queued on the gather queue so we can close it.
g.gatherQueue.Close()
gatherErr := g.gatherQueue.Wait()
g.log.Debugf("Gather step finished in %.2f seconds", time.Since(start).Seconds())
// No more work can be queued on the inspect queue so we can close it.
g.inspectQueue.Close()
inspectErr := g.inspectQueue.Wait()
g.log.Debugf("Inspect step finished in %.2f seconds", time.Since(start).Seconds())
// All work completed. Report fatal errors to caller.
if gatherErr != nil || inspectErr != nil {
return fmt.Errorf("failed to gather (gather: %w, inspect: %w)", gatherErr, inspectErr)
}
return nil
}
func (g *Gatherer) Count() int {
return len(g.resources)
}
func (g *Gatherer) prepare() error {
var namespaces []string
if len(g.opts.Namespaces) > 0 {
var err error
namespaces, err = g.gatherNamespaces()
if err != nil {
// We cannot gather anything.
return err
}
if len(namespaces) == 0 {
// Nothing to gather - expected conditions when gathering namespace
// from multiple cluster when namespace exists only on some.
g.log.Debug("No namespace to gather")
if !g.opts.Cluster {
return nil
}
}
} else if g.opts.Namespaces == nil {
namespaces = []string{metav1.NamespaceAll}
}
resources, err := g.listAPIResources()
if err != nil {
// We cannot gather anything.
return fmt.Errorf("cannot list api resources: %s", err)
}
for i := range resources {
r := &resources[i]
if r.Namespaced {
for j := range namespaces {
namespace := namespaces[j]
g.gatherQueue.Queue(func() error {
g.gatherResources(r, namespace)
return nil
})
}
} else if g.opts.Cluster {
g.gatherQueue.Queue(func() error {
g.gatherResources(r, "")
return nil
})
}
}
return nil
}
func (g *Gatherer) listAPIResources() ([]resourceInfo, error) {
start := time.Now()
client, err := discovery.NewDiscoveryClientForConfigAndClient(g.config, g.httpClient)
if err != nil {
return nil, err
}
items, err := client.ServerPreferredResources()
if err != nil {
return nil, err
}
resources := []resourceInfo{}
for _, list := range items {
// Some resources have empty Group, and the resource list have only
// GroupVersion. Seems that the only way to get the Group is to parse
// it (what kubectl api-resources does).
gv, err := schema.ParseGroupVersion(list.GroupVersion)
if err != nil {
continue
}
for i := range list.APIResources {
res := &list.APIResources[i]
if !g.shouldGather(gv, res) {
continue
}
resources = append(resources, resourceInfo{
GroupVersionResource: gv.WithResource(res.Name),
Namespaced: res.Namespaced,
})
}
}
g.log.Debugf("Listed %d api resources in %.3f seconds", len(resources), time.Since(start).Seconds())
return resources, nil
}
// gatherNamespaces gathers the requested namespaces and return a list of
// available namespaces on this cluster.
func (g *Gatherer) gatherNamespaces() ([]string, error) {
gvr := corev1.SchemeGroupVersion.WithResource("namespaces")
var found []string
for _, namespace := range g.opts.Namespaces {
ns, err := g.client.Resource(gvr).
Get(context.TODO(), namespace, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return nil, fmt.Errorf("cannot get namespace %q: %s", namespace, err)
}
// Expected condition when gathering multiple clusters.
g.log.Debugf("Skipping missing namespace %q", namespace)
continue
}
r := resourceInfo{GroupVersionResource: gvr}
key := g.keyFromResource(&r, ns)
if g.addResource(key) {
if err := g.dumpResource(&r, ns); err != nil {
g.log.Warnf("Cannot dump %q: %s", key, err)
}
}
found = append(found, namespace)
}
return found, nil
}
func (g *Gatherer) shouldGather(gv schema.GroupVersion, res *metav1.APIResource) bool {
// We cannot gather resources we cannot list.
if !slices.Contains(res.Verbs, "list") {
return false
}
if len(g.opts.Namespaces) != 0 {
// olm bug? - returned for *every namespace* when listing by namespace.
// https://github.com/operator-framework/operator-lifecycle-manager/issues/2932
if res.Name == "packagemanifests" && gv.Group == "packages.operators.coreos.com" {
return false
}
}
// Skip "events", replaced by "events.events.k8s.io". Otherwise we
// get all events twice, as "events" and as "events.events.k8s.io",
// both resources contain the same content.
if res.Name == "events" && gv.Group == "" {
return false
}
// Avoid warning: "v1 ComponentStatus is deprecated in v1.19+"
if res.Name == "componentstatuses" && gv.Group == "" {
return false
}
return true
}
func (g *Gatherer) gatherResources(r *resourceInfo, namespace string) {
start := time.Now()
opts := metav1.ListOptions{Limit: listResourcesLimit}
count := 0
for {
list, err := g.listResources(r, namespace, opts)
if err != nil {
// Fall back to full list only if this was an attempt to get the next
// page and the resource expired.
if opts.Continue == "" || !errors.IsResourceExpired(err) {
g.log.Warnf("Cannot list %q: %s", r.Name(), err)
break
}
g.log.Debugf("Falling back to full list for %q: %s", r.Name(), err)
opts.Limit = 0
opts.Continue = ""
list, err = g.listResources(r, namespace, opts)
if err != nil {
g.log.Warnf("Cannot list %q: %s", r.Name(), err)
break
}
}
addon := g.addons[r.Name()]
for i := range list.Items {
item := &list.Items[i]
key := g.keyFromResource(r, item)
if !g.addResource(key) {
continue
}
count += 1
if err := g.dumpResource(r, item); err != nil {
g.log.Warnf("Cannot dump %q: %s", key, err)
}
if addon != nil {
if err := addon.Inspect(item); err != nil {
g.log.Warnf("Cannot inspect %q: %s", key, err)
}
}
}
opts.Continue = list.GetContinue()
if opts.Continue == "" {
break
}
}
g.log.Debugf("Gathered %d %q in %.3f seconds", count, r.Name(), time.Since(start).Seconds())
}
func (g *Gatherer) listResources(r *resourceInfo, namespace string, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
start := time.Now()
ctx := context.TODO()
var list *unstructured.UnstructuredList
var err error
if r.Namespaced {
list, err = g.client.Resource(r.GroupVersionResource).
Namespace(namespace).
List(ctx, opts)
} else {
list, err = g.client.Resource(r.GroupVersionResource).
List(ctx, opts)
}
if err != nil {
return nil, err
}
g.log.Debugf("Listed %d %q in %.3f seconds", len(list.Items), r.Name(), time.Since(start).Seconds())
return list, nil
}
func (g *Gatherer) gatherResource(gvr schema.GroupVersionResource, name types.NamespacedName) {
start := time.Now()
r := resourceInfo{GroupVersionResource: gvr, Namespaced: name.Namespace != ""}
key := g.keyFromName(&r, name)
if !g.addResource(key) {
return
}
item, err := g.getResource(&r, name)
if err != nil {
g.log.Warnf("Cannot get %q: %s", key, err)
return
}
if err := g.dumpResource(&r, item); err != nil {
g.log.Warnf("Cannot dump %q: %s", key, err)
return
}
g.log.Debugf("Gathered %q in %.3f seconds", key, time.Since(start).Seconds())
}
func (g *Gatherer) getResource(r *resourceInfo, name types.NamespacedName) (*unstructured.Unstructured, error) {
ctx := context.TODO()
var opts metav1.GetOptions
if r.Namespaced {
return g.client.Resource(r.GroupVersionResource).
Namespace(name.Namespace).
Get(ctx, name.Name, opts)
} else {
return g.client.Resource(r.GroupVersionResource).
Get(ctx, name.Name, opts)
}
}
func (g *Gatherer) dumpResource(r *resourceInfo, item *unstructured.Unstructured) error {
dst, err := g.createResource(r, item)
if err != nil {
return err
}
defer dst.Close()
writer := bufio.NewWriter(dst)
printer := printers.YAMLPrinter{}
if err := printer.PrintObj(item, writer); err != nil {
return err
}
return writer.Flush()
}
func (g *Gatherer) createResource(r *resourceInfo, item *unstructured.Unstructured) (io.WriteCloser, error) {
if r.Namespaced {
return g.output.CreateNamespacedResource(item.GetNamespace(), r.Name(), item.GetName())
} else {
return g.output.CreateClusterResource(r.Name(), item.GetName())
}
}
func (g *Gatherer) keyFromResource(r *resourceInfo, item *unstructured.Unstructured) string {
name := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
return g.keyFromName(r, name)
}
func (g *Gatherer) keyFromName(r *resourceInfo, name types.NamespacedName) string {
if r.Namespaced {
return fmt.Sprintf("namespaces/%s/%s/%s", name.Namespace, r.Name(), name.Name)
} else {
return fmt.Sprintf("cluster/%s/%s", r.Name(), name.Name)
}
}
func (g *Gatherer) addResource(key string) bool {
g.mutex.Lock()
defer g.mutex.Unlock()
if _, ok := g.resources[key]; ok {
return false
}
g.resources[key] = struct{}{}
return true
}
07070100000037000081A400000000000000000000000168A76710000012E1000000000000000000000000000000000000002900000000kubectl-gather-0.10.1/pkg/gather/logs.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"context"
"fmt"
"io"
"time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
)
const (
logsName = "logs"
)
type LogsAddon struct {
AddonBackend
client *kubernetes.Clientset
log *zap.SugaredLogger
}
type containerInfo struct {
Namespace string
Pod string
Name string
HasPreviousLog bool
}
func (c containerInfo) String() string {
return c.Namespace + "/" + c.Pod + "/" + c.Name
}
func init() {
registerAddon(logsName, addonInfo{
Resource: "pods",
AddonFunc: NewLogsAddon,
})
}
func NewLogsAddon(backend AddonBackend) (Addon, error) {
client, err := kubernetes.NewForConfigAndClient(backend.Config(), backend.HTTPClient())
if err != nil {
return nil, err
}
return &LogsAddon{
AddonBackend: backend,
client: client,
log: backend.Options().Log.Named(logsName),
}, nil
}
func (a *LogsAddon) Inspect(pod *unstructured.Unstructured) error {
a.log.Debugf("Inspecting pod \"%s/%s\"", pod.GetNamespace(), pod.GetName())
containers, err := a.listContainers(pod)
if err != nil {
return fmt.Errorf("cannot find containers in pod \"%s/%s\": %s",
pod.GetNamespace(), pod.GetName(), err)
}
for i := range containers {
container := containers[i]
a.Queue(func() error {
opts := corev1.PodLogOptions{Container: container.Name}
a.gatherContainerLog(container, &opts)
return nil
})
if container.HasPreviousLog {
a.Queue(func() error {
opts := corev1.PodLogOptions{Container: container.Name, Previous: true}
a.gatherContainerLog(container, &opts)
return nil
})
}
}
return nil
}
func (a *LogsAddon) gatherContainerLog(container *containerInfo, opts *corev1.PodLogOptions) {
start := time.Now()
which := "current"
if opts.Previous {
which = "previous"
}
req := a.client.CoreV1().Pods(container.Namespace).GetLogs(container.Pod, opts)
src, err := req.Stream(context.TODO())
if err != nil {
// Getting the log is possible only if a container is running, but
// checking the container state before the call is racy. We get a
// BadRequest error like: "container ... in pod ... is waiting to start:
// PodInitializing" so there is no way to detect the actual problem.
// Since this is expected situation, and getting logs is best effort, we
// log this in debug level.
a.log.Debugf("Cannot get log for \"%s/%s\": %v", container, which, err)
return
}
defer src.Close()
dst, err := a.Output().CreateContainerLog(
container.Namespace, container.Pod, container.Name, string(which))
if err != nil {
a.log.Warnf("Cannot create \"%s/%s.log\": %s", container, which, err)
return
}
defer dst.Close()
n, err := io.Copy(dst, src)
if err != nil {
a.log.Warnf("Cannot copy \"%s/%s.log\": %s", container, which, err)
}
elapsed := time.Since(start).Seconds()
rate := float64(n) / float64(1024*1024) / elapsed
a.log.Debugf("Gathered \"%s/%s.log\" in %.3f seconds (%.2f MiB/s)",
container, which, elapsed, rate)
}
func (a *LogsAddon) listContainers(pod *unstructured.Unstructured) ([]*containerInfo, error) {
var result []*containerInfo
for _, key := range []string{"containerStatuses", "initContainerStatuses"} {
statuses, found, err := unstructured.NestedSlice(pod.Object, "status", key)
if err != nil {
a.log.Warnf("Cannot get %q for pod \"%s/%s\": %s",
key, pod.GetNamespace(), pod.GetName(), err)
continue
}
if !found {
continue
}
for _, c := range statuses {
status, ok := c.(map[string]interface{})
if !ok {
a.log.Warnf("Invalid container status for pod \"%s/%s\": %s",
pod.GetNamespace(), pod.GetName(), status)
continue
}
name, found, err := unstructured.NestedString(status, "name")
if err != nil || !found {
a.log.Warnf("No container status name for pod \"%s/%s\": %s",
pod.GetNamespace(), pod.GetName(), status)
continue
}
result = append(result, &containerInfo{
Namespace: pod.GetNamespace(),
Pod: pod.GetName(),
Name: name,
HasPreviousLog: containerHasPreviousLog(status),
})
}
}
return result, nil
}
// containerHasPreviousLog returns true if we can get a previous log for a
// container, based on container status.
//
// lastState:
// terminated:
// containerID: containerd://...
//
// See also https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go#L1453
func containerHasPreviousLog(status map[string]interface{}) bool {
containerID, found, err := unstructured.NestedString(status, "lastState", "terminated", "containerID")
if err != nil || !found {
return false
}
return containerID != ""
}
07070100000038000081A400000000000000000000000168A7671000000688000000000000000000000000000000000000002B00000000kubectl-gather-0.10.1/pkg/gather/output.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"io"
"os"
"path/filepath"
)
const (
namespacesDir = "namespaces"
clusterDir = "cluster"
addonsDir = "addons"
resourceSuffix = ".yaml"
logSuffix = ".log"
)
type OutputDirectory struct {
base string
}
func (o *OutputDirectory) CreateContainerLog(namespace string, pod string, container string, name string) (io.WriteCloser, error) {
dir, err := createDirectory(o.base, namespacesDir, namespace, "pods", pod, container)
if err != nil {
return nil, err
}
return createFile(dir, name+logSuffix)
}
func (o *OutputDirectory) CreateNamespacedResource(namespace string, resource string, name string) (io.WriteCloser, error) {
dir, err := createDirectory(o.base, namespacesDir, namespace, resource)
if err != nil {
return nil, err
}
return createFile(dir, name+resourceSuffix)
}
func (o *OutputDirectory) CreateClusterResource(resource string, name string) (io.WriteCloser, error) {
dir, err := createDirectory(o.base, clusterDir, resource)
if err != nil {
return nil, err
}
return createFile(dir, name+resourceSuffix)
}
func (o *OutputDirectory) CreateAddonDir(name string, more ...string) (string, error) {
args := append([]string{o.base, addonsDir, name}, more...)
return createDirectory(args...)
}
func createDirectory(args ...string) (string, error) {
dir := filepath.Join(args...)
if err := os.MkdirAll(dir, 0750); err != nil {
return "", err
}
return dir, nil
}
func createFile(dir string, name string) (io.WriteCloser, error) {
filename := filepath.Join(dir, name)
return os.Create(filename)
}
07070100000039000081A400000000000000000000000168A76710000005B7000000000000000000000000000000000000003200000000kubectl-gather-0.10.1/pkg/gather/output_reader.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"errors"
"os"
"path/filepath"
"strings"
)
type OutputReader struct {
base string
}
// NewOutputReader creates a new OutputReader instance.
func NewOutputReader(path string) *OutputReader {
return &OutputReader{base: path}
}
// ListResources lists resource names in namespace.
func (r *OutputReader) ListResources(namespace, resource string) ([]string, error) {
var resourceDir string
if namespace == "" {
resourceDir = filepath.Join(r.base, clusterDir, resource)
} else {
resourceDir = filepath.Join(r.base, namespacesDir, namespace, resource)
}
entries, err := os.ReadDir(resourceDir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, err
}
var names []string
for _, e := range entries {
// Skip pod directory.
if e.IsDir() {
continue
}
resourceName := strings.TrimSuffix(e.Name(), resourceSuffix)
names = append(names, resourceName)
}
return names, nil
}
// ReadResource reads named resource data.
func (r *OutputReader) ReadResource(namespace, resource, name string) ([]byte, error) {
var resourcePath string
if namespace == "" {
resourcePath = filepath.Join(r.base, clusterDir, resource, name+resourceSuffix)
} else {
resourcePath = filepath.Join(r.base, namespacesDir, namespace, resource, name+resourceSuffix)
}
return os.ReadFile(resourcePath)
}
0707010000003A000081A400000000000000000000000168A76710000007CC000000000000000000000000000000000000002900000000kubectl-gather-0.10.1/pkg/gather/pvcs.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
)
const (
pvcsName = "pvcs"
)
type pvcsAddon struct {
AddonBackend
log *zap.SugaredLogger
}
func init() {
registerAddon(pvcsName, addonInfo{
Resource: "persistentvolumeclaims",
AddonFunc: NewPVCAddon,
})
}
func NewPVCAddon(backend AddonBackend) (Addon, error) {
return &pvcsAddon{
AddonBackend: backend,
log: backend.Options().Log.Named(pvcsName),
}, nil
}
func (a *pvcsAddon) Inspect(pvc *unstructured.Unstructured) error {
// When cluster flag is set, PV and StorageClass resources are already gathered at cluster level
if a.Options().Cluster {
return nil
}
a.log.Debugf("Inspecting pvc \"%s/%s\"", pvc.GetNamespace(), pvc.GetName())
a.gatherPersistentVolume(pvc)
a.gatherStorageClass(pvc)
return nil
}
func (a *pvcsAddon) gatherPersistentVolume(pvc *unstructured.Unstructured) {
name, found, err := unstructured.NestedString(pvc.Object, "spec", "volumeName")
if err != nil {
a.log.Warnf("Cannot get pvc \"%s/%s\" volumeName: %s",
pvc.GetNamespace(), pvc.GetName(), err)
return
}
if name == "" || !found {
return
}
gvr := corev1.SchemeGroupVersion.WithResource("persistentvolumes")
a.GatherResource(gvr, types.NamespacedName{Name: name})
}
func (a *pvcsAddon) gatherStorageClass(pvc *unstructured.Unstructured) {
name, found, err := unstructured.NestedString(pvc.Object, "spec", "storageClassName")
if err != nil {
a.log.Warnf("Cannot get pvc \"%s/%s\" storageClassName: %s",
pvc.GetNamespace(), pvc.GetName(), err)
return
}
if name == "" || !found {
// TODO: Get the default storage class?
return
}
gvr := storagev1.SchemeGroupVersion.WithResource("storageclasses")
a.GatherResource(gvr, types.NamespacedName{Name: name})
}
0707010000003B000081A400000000000000000000000168A76710000016CF000000000000000000000000000000000000002900000000kubectl-gather-0.10.1/pkg/gather/rook.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
)
const (
rookName = "rook"
)
type RookAddon struct {
AddonBackend
client *kubernetes.Clientset
log *zap.SugaredLogger
}
func init() {
registerAddon(rookName, addonInfo{
Resource: "ceph.rook.io/cephclusters",
AddonFunc: NewRookAddon,
})
}
func NewRookAddon(backend AddonBackend) (Addon, error) {
clientSet, err := kubernetes.NewForConfigAndClient(backend.Config(), backend.HTTPClient())
if err != nil {
return nil, err
}
return &RookAddon{
AddonBackend: backend,
client: clientSet,
log: backend.Options().Log.Named(rookName),
}, nil
}
func (a *RookAddon) Inspect(cephcluster *unstructured.Unstructured) error {
namespace := cephcluster.GetNamespace()
a.log.Debugf("Inspecting cephcluster \"%s/%s\"", namespace, cephcluster.GetName())
a.gatherCommands(namespace)
if a.logCollectorEnabled(cephcluster) {
dataDir, err := a.dataDirHostPath(cephcluster)
if err != nil {
a.log.Warnf("Cannot get cephcluster dataDirHostPath: %s", err)
return nil
}
a.gatherLogs(namespace, dataDir)
}
return nil
}
func (a *RookAddon) gatherCommands(namespace string) {
tools, err := a.findPod(namespace, "app=rook-ceph-tools")
if err != nil {
a.log.Warnf("Cannot find tools pod: %s", err)
return
}
a.log.Debugf("Using pod %q", tools.Name)
commands, err := a.Output().CreateAddonDir(rookName, "commands")
if err != nil {
a.log.Warnf("Cannot create commands directory: %s", err)
return
}
a.log.Debugf("Storing commands output in %q", commands)
rc := NewRemoteCommand(tools, a.Options(), a.log, commands)
// Running remote ceph commands in parallel is much faster.
a.Queue(func() error {
a.gatherCommand(rc, "ceph", "osd", "blocklist", "ls")
return nil
})
a.Queue(func() error {
a.gatherCommand(rc, "ceph", "status")
return nil
})
}
func (a *RookAddon) gatherCommand(rc *RemoteCommand, command ...string) {
if err := rc.Gather(command...); err != nil {
a.log.Warnf("Error running %q: %s", strings.Join(command, "-"), err)
}
}
func (a *RookAddon) logCollectorEnabled(cephcluster *unstructured.Unstructured) bool {
enabled, found, err := unstructured.NestedBool(cephcluster.Object, "spec", "logCollector", "enabled")
if err != nil {
a.log.Warnf("Cannot get cephcluster .spec.logCollector.enabled: %s", err)
}
return found && enabled
}
func (a *RookAddon) dataDirHostPath(cephcluster *unstructured.Unstructured) (string, error) {
path, found, err := unstructured.NestedString(cephcluster.Object, "spec", "dataDirHostPath")
if err != nil {
return "", err
}
if !found {
return "", fmt.Errorf("cannot find .spec.dataDirHostPath")
}
return path, nil
}
func (a *RookAddon) gatherLogs(namespace string, dataDir string) {
nodes, err := a.findNodesToGather(namespace)
if err != nil {
a.log.Warnf("Cannot find nodes: %s", err)
return
}
for i := range nodes {
nodeName := nodes[i]
a.Queue(func() error {
a.gatherNodeLogs(namespace, nodeName, dataDir)
return nil
})
}
}
func (a *RookAddon) findNodesToGather(namespace string) ([]string, error) {
pods, err := a.client.CoreV1().
Pods(namespace).
List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
names := sets.New[string]()
for i := range pods.Items {
pod := &pods.Items[i]
if pod.Spec.NodeName != "" {
names.Insert(pod.Spec.NodeName)
}
}
return names.UnsortedList(), nil
}
func (a *RookAddon) gatherNodeLogs(namespace string, nodeName string, dataDir string) {
a.log.Debugf("Gathering ceph logs from node %q dataDir %q", nodeName, dataDir)
start := time.Now()
agent, err := a.createAgentPod(nodeName, dataDir)
if err != nil {
a.log.Warnf("Cannot create agent pod: %s", err)
return
}
defer agent.Delete()
if err := agent.WaitUntilRunning(); err != nil {
a.log.Warnf("Error waiting for agent pod: %s", agent, err)
return
}
a.log.Debugf("Agent pod %q running in %.3f seconds", agent, time.Since(start).Seconds())
logs, err := a.Output().CreateAddonDir(rookName, "logs", nodeName)
if err != nil {
a.log.Warnf("Cannot create logs directory: %s", err)
return
}
rd := NewRemoteDirectory(agent.Pod, a.Options(), a.log)
src := filepath.Join(dataDir, namespace, "log")
if err := rd.Gather(src, logs); err != nil {
a.log.Warnf("Cannot copy %q from agent pod %q: %s", src, agent.Pod.Name, err)
}
a.log.Debugf("Gathered node %q logs in %.3f seconds", nodeName, time.Since(start).Seconds())
}
func (a *RookAddon) createAgentPod(nodeName string, dataDir string) (*AgentPod, error) {
agent := NewAgentPod(rookName+"-"+nodeName, a.client, a.log)
agent.Pod.Spec.NodeName = nodeName
agent.Pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
{
Name: "rook-data",
MountPath: dataDir,
ReadOnly: true,
},
}
agent.Pod.Spec.Volumes = []corev1.Volume{
{
Name: "rook-data",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{Path: dataDir},
},
},
}
if err := agent.Create(); err != nil {
return nil, err
}
return agent, nil
}
func (a *RookAddon) findPod(namespace string, labelSelector string) (*corev1.Pod, error) {
pods, err := a.client.CoreV1().
Pods(namespace).
List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return nil, err
}
if len(pods.Items) == 0 {
return nil, fmt.Errorf("no pod matches %q in namespace %q", labelSelector, namespace)
}
return &pods.Items[0], nil
}
0707010000003C000081A400000000000000000000000168A76710000004D9000000000000000000000000000000000000002E00000000kubectl-gather-0.10.1/pkg/gather/workqueue.go// SPDX-FileCopyrightText: The kubectl-gather authors
// SPDX-License-Identifier: Apache-2.0
package gather
import "sync"
type WorkFunc func() error
type Queuer interface {
Queue(WorkFunc)
}
type WorkQueue struct {
queue chan WorkFunc
workers int
wg sync.WaitGroup
mutex sync.Mutex
err error
closed bool
}
func NewWorkQueue(workers int) *WorkQueue {
return &WorkQueue{
queue: make(chan WorkFunc),
workers: workers,
}
}
func (q *WorkQueue) Queue(work WorkFunc) {
q.queue <- work
}
func (q *WorkQueue) Start() {
for i := 0; i < q.workers; i++ {
q.wg.Add(1)
go func() {
defer q.wg.Done()
for work := range q.queue {
err := work()
if err != nil {
q.setFirstError(err)
}
}
}()
}
}
func (q *WorkQueue) Wait() error {
q.wg.Wait()
return q.firstError()
}
func (q *WorkQueue) Close() {
// Closing closed channel panics, so we must call it exactly once.
q.mutex.Lock()
defer q.mutex.Unlock()
if !q.closed {
close(q.queue)
q.closed = true
}
}
func (q *WorkQueue) firstError() error {
q.mutex.Lock()
defer q.mutex.Unlock()
return q.err
}
func (q *WorkQueue) setFirstError(err error) {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.err == nil {
q.err = err
}
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!341 blocks