File aardvark-dns-1.14.0.obscpio of Package aardvark-dns

07070100000000000081A400000000000000000000000167AA0322000023D1000000000000000000000000000000000000002000000000aardvark-dns-1.14.0/.cirrus.yml---

# Format Ref: https://cirrus-ci.org/guide/writing-tasks/

# Main collection of env. vars to set for all tasks and scripts.
env:
    # Actual|intended branch for this run
    DEST_BRANCH: "main"
    # The default is 'sh' if unspecified
    CIRRUS_SHELL: "/bin/bash"
    # Location where source repo. will be cloned
    CIRRUS_WORKING_DIR: "/var/tmp/aardvark-dns"
    # Rust package cache also lives here
    CARGO_HOME: "/var/cache/cargo"
    # Rust compiler output lives here (see Makefile)
    CARGO_TARGET_DIR: "$CIRRUS_WORKING_DIR/targets"
    # Testing depends on the latest netavark binary from upstream CI
    NETAVARK_BRANCH: "main"
    NETAVARK_URL: "https://api.cirrus-ci.com/v1/artifact/github/containers/netavark/success/binary.zip?branch=${NETAVARK_BRANCH}"
    # Save a little typing (path relative to $CIRRUS_WORKING_DIR)
    SCRIPT_BASE: "./contrib/cirrus"
    IMAGE_SUFFIX: "c20250131t121915z-f41f40d13"
    FEDORA_NETAVARK_IMAGE: "fedora-netavark-${IMAGE_SUFFIX}"
    FEDORA_NETAVARK_AMI: "fedora-netavark-aws-arm64-${IMAGE_SUFFIX}"
    EC2_INST_TYPE: "t4g.xlarge"


gcp_credentials: ENCRYPTED[f6a0e4101418bec8180783b208721fc990772817364fed0346f5fd126bf0cfca03738dd8c7fb867944637a1eac7cec37]

aws_credentials: ENCRYPTED[3fab904a98355f84b0bac084f8a50428ff8a27dd2b6a6c42fca77df89010e620a1da3cd246a50b1074e6787c42818080]

build_task:
  alias: "build"
  # Compiling is very CPU intensive, make it chooch quicker for this task only
  gce_instance: &standard_build_gce_x86_64
    image_project: "libpod-218412"
    zone: "us-central1-c"
    disk: 200  # GB, do not set <200 per gcloud warning re: I/O performance
    cpu: 8
    memory: "8Gb"
    image_name: "${FEDORA_NETAVARK_IMAGE}"
  cargo_cache: &cargo_cache
    folder: "$CARGO_HOME"
    fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
    reupload_on_changes: true
  targets_cache: &targets_cache
    folder: "$CARGO_TARGET_DIR"
    fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
    reupload_on_changes: true
  bin_cache: &bin_cache
    # This simply prevents rebuilding bin/aardvark-dns* or every subsequent task.
    folder: "$CIRRUS_WORKING_DIR/bin"
    fingerprint_key: "bin_v1_${CIRRUS_BUILD_ID}" # Cache only within same build
    reupload_on_changes: true
  setup_script: &setup "$SCRIPT_BASE/setup.sh $CIRRUS_TASK_NAME"
  main_script: &main "$SCRIPT_BASE/runner.sh $CIRRUS_TASK_NAME"
  # N/B: This script comes from `main` on the netavark repo
  cache_grooming_script: &groom bash "$SCRIPT_BASE/netavark_cache_groom.sh"
  upload_caches: [ "cargo", "targets", "bin" ]


build_aarch64_task:
  alias: "build_aarch64"
  # Compiling is very CPU intensive, make it chooch quicker for this task only
  ec2_instance: &standard_build_ec2_aarch64
    image: "$FEDORA_NETAVARK_AMI"
    type: $EC2_INST_TYPE
    region: us-east-1
    architecture: arm64  # CAUTION: This has to be "arm64", not aarch64.
  cargo_cache: &cargo_cache_aarch64
    <<: *cargo_cache
    fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
  targets_cache: &targets_cache_aarch64
    <<: *targets_cache
    fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
  bin_cache: &bin_cache_aarch64
    <<: *bin_cache
    fingerprint_key: "cargo_v1_${DEST_BRANCH}_aarch64"
  setup_script: *setup
  main_script: *main
  cache_grooming_script: *groom
  upload_caches: [ "cargo", "targets", "bin" ]
  # Downstream CI needs the aarch64 binaries from this CI system.
  # However, we don't want to confuse architectures.
  art_prep_script:
    - cd bin
    - ls -la
    - mv aardvark-dns aardvark-dns.$(uname -m)-unknown-linux-gnu
    - mv aardvark-dns.debug aardvark-dns.debug.$(uname -m)-unknown-linux-gnu
    - mv aardvark-dns.info aardvark-dns.info.$(uname -m)-unknown-linux-gnu
  armbinary_artifacts:  # See success_task
    path: ./bin/aardvark-dns*


validate_task:
  alias: "validate"
  depends_on:
    - "build"
  gce_instance: &standard_gce_x86_64
    <<: *standard_build_gce_x86_64
    cpu: 2
    memory: "4Gb"
  # From this point forward, all cache's become read-only for this run.
  cargo_cache: &ro_cargo_cache
    <<: *cargo_cache
    reupload_on_changes: false
  targets_cache: &ro_targets_cache
    <<: *targets_cache
    reupload_on_changes: false
  bin_cache: &ro_bin_cache
    <<: *bin_cache
    reupload_on_changes: false
  setup_script: *setup
  main_script: *main


validate_aarch64_task:
  alias: "validate_aarch64"
  depends_on:
    - "build_aarch64"
  ec2_instance: *standard_build_ec2_aarch64
  # From this point forward, all cache's become read-only for this run.
  cargo_cache: &ro_cargo_cache_aarch64
    <<: *cargo_cache_aarch64
    reupload_on_changes: false
  targets_cache: &ro_targets_cache_aarch64
    <<: *targets_cache_aarch64
    reupload_on_changes: false
  bin_cache: &ro_bin_cache_aarch64
    <<: *bin_cache_aarch64
    reupload_on_changes: false
  setup_script: *setup
  main_script: *main

unit_task:
  alias: "unit"
  depends_on:
    - "build"  # Run in parallel with validate to save some time
  gce_instance: *standard_gce_x86_64
  cargo_cache: *ro_cargo_cache
  targets_cache: *ro_targets_cache
  bin_cache: *ro_bin_cache
  setup_script: *setup
  main_script: *main


unit_aarch64_task:
  alias: "unit_aarch64"
  depends_on:
    - "build_aarch64"  # Run in parallel with validate to save some time
  ec2_instance: *standard_build_ec2_aarch64
  cargo_cache: *ro_cargo_cache_aarch64
  targets_cache: *ro_targets_cache_aarch64
  bin_cache: *ro_bin_cache_aarch64
  setup_script: *setup
  main_script: *main


integration_task:
  alias: "integration"
  depends_on:
    - "unit"
  gce_instance: *standard_gce_x86_64
  cargo_cache: *ro_cargo_cache
  targets_cache: *ro_targets_cache
  bin_cache: *ro_bin_cache
  setup_script: *setup
  main_script: *main


integration_aarch64_task:
  alias: "integration_aarch64"
  depends_on:
    - "unit_aarch64"
  ec2_instance: *standard_build_ec2_aarch64
  cargo_cache: *ro_cargo_cache_aarch64
  targets_cache: *ro_targets_cache_aarch64
  bin_cache: *ro_bin_cache_aarch64
  setup_script: *setup
  main_script: *main


# This task is critical.  It updates the "last-used by" timestamp stored
# in metadata for all VM images.  This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
    alias: meta
    name: "VM img. keepalive"
    container:
        cpu: 2
        memory: 2
        image: quay.io/libpod/imgts:latest
    env:
        # Space-separated list of images used by this repository state
        IMGNAMES: "${FEDORA_NETAVARK_IMAGE}"
        EC2IMGNAMES: "$FEDORA_NETAVARK_AMI"
        BUILDID: "${CIRRUS_BUILD_ID}"
        REPOREF: "${CIRRUS_REPO_NAME}"
        AWSINI: ENCRYPTED[94661de0a481bfa6757f6ef26f896c28578c764f00364a293871b7576337c947304e2dfa35e3819c066057ef3957237f]
        GCPJSON: ENCRYPTED[4c8f37db84c8afb3d67932ebbf1f062e5e7e54b64e9f99624d96d828d2b8677624fb1470a9b12c097e06afeb11fb8c4e]
        GCPNAME: ENCRYPTED[1d96b7a11a12abe142a2e6f5a97ff6cca2bdcbe73724d560d9a2d339b7323b911c814d814057e19932f9d339e9a4c929]
        GCPPROJECT: libpod-218412
    clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR  # source not needed
    script: /usr/local/bin/entrypoint.sh

msrv_build_task:
    alias: msrv_build
    depends_on:
      - "build"
    gce_instance: *standard_gce_x86_64
    container:
        cpu: 2
        memory: 2
        # When bumping the image always remember to update the README MSRV as well.
        image: quay.io/libpod/nv-rust:1.76
    script:
        - make build


success_task:
  alias: "success"
  gce_instance: *standard_gce_x86_64
  name: "Total success"
  depends_on:
    - "build"
    - "build_aarch64"
    - "validate"
    - "validate_aarch64"
    - "unit"
    - "unit_aarch64"
    - "integration"
    - "integration_aarch64"
    - "meta"
    - "msrv_build"
  env:
    API_URL_BASE: "https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}"
    # FAIL task if all expected binary flavors are not present
    EXP_BINS: >-
        aardvark-dns
        aardvark-dns.debug
        aardvark-dns.info
        aardvark-dns.aarch64-unknown-linux-gnu
        aardvark-dns.debug.aarch64-unknown-linux-gnu
        aardvark-dns.info.aarch64-unknown-linux-gnu
  bin_cache: *ro_bin_cache
  clone_script: *noop
  # The paths used for uploaded artifacts are relative here and in Cirrus
  artifacts_prep_script:
    - set -x
    - curl --fail --location -o /tmp/armbinary.zip ${API_URL_BASE}/build_aarch64/armbinary.zip
    - unzip /tmp/armbinary.zip
    - mv bin/* ./
    - rm -rf bin
  artifacts_test_script:  # Other CI systems depend on all files being present
    - ls -la
    # If there's a missing file, show what it was in the output
    - for fn in $EXP_BINS; do [[ -r "$(echo $fn|tee /dev/stderr)" ]] || exit 1; done
  # Upload tested binary for consumption downstream
  # https://cirrus-ci.org/guide/writing-tasks/#artifacts-instruction
  binary_artifacts:
    path: ./aardvark-dns*
07070100000001000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001900000000aardvark-dns-1.14.0/.fmf07070100000002000081A400000000000000000000000167AA032200000002000000000000000000000000000000000000002100000000aardvark-dns-1.14.0/.fmf/version1
07070100000003000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001C00000000aardvark-dns-1.14.0/.github07070100000004000081A400000000000000000000000167AA032200000845000000000000000000000000000000000000002B00000000aardvark-dns-1.14.0/.github/renovate.json5/*
   Renovate is a service similar to GitHub Dependabot, but with
   (fantastically) more configuration options.  So many options
   in fact, if you're new I recommend glossing over this cheat-sheet
   prior to the official documentation:

   https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet

   Configuration Update/Change Procedure:
     1. Make changes
     2. Manually validate changes (from repo-root):

        podman run -it \
            -v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
            docker.io/renovate/renovate:latest \
            renovate-config-validator
     3. Commit.

   Configuration Reference:
   https://docs.renovatebot.com/configuration-options/

   Monitoring Dashboard:
   https://app.renovatebot.com/dashboard#github/containers

   Note: The Renovate bot will create/manage it's business on
         branches named 'renovate/*'.  Otherwise, and by
         default, the only the copy of this file that matters
         is the one on the `main` branch.  No other branches
         will be monitored or touched in any way.
*/

{
  "$schema": "https://docs.renovatebot.com/renovate-schema.json",

  /*************************************************
   ****** Global/general configuration options *****
   *************************************************/

  // Re-use predefined sets of configuration options to DRY
  "extends": [
    // https://github.com/containers/automation/blob/main/renovate/defaults.json5
    "github>containers/automation//renovate/defaults.json5"
  ],

  // Permit automatic rebasing when base-branch changes by more than
  // one commit.
  "rebaseWhen": "behind-base-branch",

  /*************************************************
   *** Repository-specific configuration options ***
   *************************************************/

  // Don't leave dep. update. PRs "hanging", assign them to people.
  "assignees": ["containers/netavark-maintainers"],

  /**************************************************
   ***** Manager-specific configuration options *****
   **************************************************/
}
07070100000005000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/.github/workflows07070100000006000081A400000000000000000000000167AA0322000002A3000000000000000000000000000000000000003C00000000aardvark-dns-1.14.0/.github/workflows/check_cirrus_cron.yml---

# See also:
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml

on:
  # Note: This only applies to the default branch.
  schedule:
    # N/B: This should correspond to a period slightly after
    # the last job finishes running.  See job defs. at:
    # https://cirrus-ci.com/settings/repository/5268168076689408
    - cron:  '03 03 * * 1-5'
  # Debug: Allow triggering job manually in github-actions WebUI
  workflow_dispatch: {}

jobs:
  # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
  call_cron_failures:
    uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
    secrets: inherit
07070100000007000081A400000000000000000000000167AA03220000029E000000000000000000000000000000000000003C00000000aardvark-dns-1.14.0/.github/workflows/rerun_cirrus_cron.yml---

# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml

on:
  # Note: This only applies to the default branch.
  schedule:
    # N/B: This should correspond to a period slightly after
    # the last job finishes running.  See job defs. at:
    # https://cirrus-ci.com/settings/repository/5268168076689408
    - cron:  '01 01 * * 1-5'
  # Debug: Allow triggering job manually in github-actions WebUI
  workflow_dispatch: {}

jobs:
  # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
  call_cron_rerun:
    uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
    secrets: inherit
07070100000008000081A400000000000000000000000167AA032200000030000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/.gitignore/bin/
target/
targets/
*.swp
netavark.1
vendor/
07070100000009000081A400000000000000000000000167AA0322000010BF000000000000000000000000000000000000002100000000aardvark-dns-1.14.0/.packit.yaml---
# See the documentation for more information:
# https://packit.dev/docs/configuration/

downstream_package_name: aardvark-dns
upstream_tag_template: v{version}

# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
# propose-downstream job. This is done so tests maintained upstream can be run
# downstream in Zuul CI and Bodhi.
# Ref: https://packit.dev/docs/configuration#files_to_sync
files_to_sync:
  - src: rpm/gating.yaml
    dest: gating.yaml
    delete: true
  - src: plans/
    dest: plans/
    delete: true
  - src: .fmf/
    dest: .fmf/
    delete: true
  - .packit.yaml

packages:
  aardvark-dns-fedora:
    pkg_tool: fedpkg
    specfile_path: rpm/aardvark-dns.spec
  aardvark-dns-centos:
    pkg_tool: centpkg
    specfile_path: rpm/aardvark-dns.spec
  aardvark-dns-eln:
    specfile_path: rpm/aardvark-dns.spec

srpm_build_deps:
  - cargo
  - make
  - openssl-devel

jobs:
  - job: copr_build
    trigger: pull_request
    packages: [aardvark-dns-fedora]
    notifications: &copr_build_failure_notification
      failure_comment:
        message: "Ephemeral COPR build failed. @containers/packit-build please check."
    targets:
      - fedora-all-x86_64
      - fedora-all-aarch64
    enable_net: true
    osh_diff_scan_after_copr_build: false

  - job: copr_build
    trigger: pull_request
    packages: [aardvark-dns-eln]
    notifications: *copr_build_failure_notification
    targets:
      fedora-eln-x86_64:
        additional_repos:
          - "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
      fedora-eln-aarch64:
        additional_repos:
          - "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
    enable_net: true

  - job: copr_build
    trigger: pull_request
    packages: [aardvark-dns-centos]
    notifications: *copr_build_failure_notification
    targets: &centos_copr_targets
      - centos-stream-9-x86_64
      - centos-stream-9-aarch64
      - centos-stream-10-x86_64
      - centos-stream-10-aarch64
    enable_net: true

  # Run on commit to main branch
  - job: copr_build
    trigger: commit
    packages: [aardvark-dns-fedora]
    notifications:
      failure_comment:
        message: "podman-next COPR build failed. @containers/packit-build please check."
    branch: main
    owner: rhcontainerbot
    project: podman-next
    enable_net: true

  # Unit tests on Fedora
  - job: tests
    trigger: pull_request
    packages: [aardvark-dns-fedora]
    notifications: &test_failure_notification
      failure_comment:
        message: "Tests failed. @containers/packit-build please check."
    targets:
      - fedora-development-x86_64
      - fedora-development-aarch64
      - fedora-latest-x86_64
      - fedora-latest-aarch64
      - fedora-latest-stable-x86_64
      - fedora-latest-stable-aarch64
      - fedora-40-x86_64
      - fedora-40-aarch64
    tf_extra_params:
      environments:
        - artifacts:
          - type: repository-file
            id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo

  # Unit tests on CentOS Stream
  - job: tests
    trigger: pull_request
    packages: [aardvark-dns-centos]
    notifications: *test_failure_notification
    targets: *centos_copr_targets
    tf_extra_params:
      environments:
        - artifacts:
          - type: repository-file
            id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
          - type: repository-file
            id: https://src.fedoraproject.org/rpms/epel-release/raw/epel$releasever/f/epel.repo

  # Sync to Fedora
  - job: propose_downstream
    trigger: release
    packages: [aardvark-dns-fedora]
    update_release: false
    dist_git_branches: &fedora_targets
      - fedora-all

  # Sync to CentOS Stream
  - job: propose_downstream
    trigger: release
    packages: [aardvark-dns-centos]
    update_release: false
    dist_git_branches:
      - c10s
      - c9s

  - job: koji_build
    trigger: commit
    packages: [aardvark-dns-fedora]
    sidetag_group: netavark-releases
    dependents:
      - netavark
    dist_git_branches: *fedora_targets
0707010000000A000081A400000000000000000000000167AA0322000000C7000000000000000000000000000000000000002700000000aardvark-dns-1.14.0/CODE-OF-CONDUCT.md## The Aardvark-dns Project Community Code of Conduct

The Aardvark-dns project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
0707010000000B000081A400000000000000000000000167AA032200009138000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/Cargo.lock# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3

[[package]]
name = "aardvark-dns"
version = "1.14.0"
dependencies = [
 "arc-swap",
 "chrono",
 "clap",
 "flume",
 "futures-util",
 "hickory-client",
 "hickory-proto",
 "hickory-server",
 "libc",
 "log",
 "nix",
 "syslog",
 "tokio",
]

[[package]]
name = "addr2line"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
 "gimli",
]

[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"

[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"

[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
 "libc",
]

[[package]]
name = "anstream"
version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
dependencies = [
 "anstyle",
 "anstyle-parse",
 "anstyle-query",
 "anstyle-wincon",
 "colorchoice",
 "is_terminal_polyfill",
 "utf8parse",
]

[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"

[[package]]
name = "anstyle-parse"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
dependencies = [
 "utf8parse",
]

[[package]]
name = "anstyle-query"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
dependencies = [
 "windows-sys 0.59.0",
]

[[package]]
name = "anstyle-wincon"
version = "3.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e"
dependencies = [
 "anstyle",
 "once_cell",
 "windows-sys 0.59.0",
]

[[package]]
name = "arc-swap"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"

[[package]]
name = "async-trait"
version = "0.1.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"

[[package]]
name = "backtrace"
version = "0.3.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
dependencies = [
 "addr2line",
 "cfg-if",
 "libc",
 "miniz_oxide",
 "object",
 "rustc-demangle",
 "windows-targets",
]

[[package]]
name = "bitflags"
version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36"

[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"

[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"

[[package]]
name = "bytes"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9"

[[package]]
name = "cc"
version = "1.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2"
dependencies = [
 "shlex",
]

[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"

[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"

[[package]]
name = "chrono"
version = "0.4.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
dependencies = [
 "android-tzdata",
 "iana-time-zone",
 "js-sys",
 "num-traits",
 "wasm-bindgen",
 "windows-targets",
]

[[package]]
name = "clap"
version = "4.5.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff"
dependencies = [
 "clap_builder",
 "clap_derive",
]

[[package]]
name = "clap_builder"
version = "4.5.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7"
dependencies = [
 "anstream",
 "anstyle",
 "clap_lex",
 "strsim",
]

[[package]]
name = "clap_derive"
version = "4.5.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed"
dependencies = [
 "heck",
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "clap_lex"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"

[[package]]
name = "colorchoice"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"

[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"

[[package]]
name = "data-encoding"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f"

[[package]]
name = "deranged"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
dependencies = [
 "powerfmt",
]

[[package]]
name = "displaydoc"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "endian-type"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"

[[package]]
name = "enum-as-inner"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc"
dependencies = [
 "heck",
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "flume"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095"
dependencies = [
 "futures-core",
 "futures-sink",
 "nanorand",
 "spin",
]

[[package]]
name = "form_urlencoded"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [
 "percent-encoding",
]

[[package]]
name = "futures-channel"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
 "futures-core",
]

[[package]]
name = "futures-core"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"

[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"

[[package]]
name = "futures-sink"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"

[[package]]
name = "futures-task"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"

[[package]]
name = "futures-util"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
 "futures-core",
 "futures-task",
 "pin-project-lite",
 "pin-utils",
 "slab",
]

[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
 "cfg-if",
 "js-sys",
 "libc",
 "wasi",
 "wasm-bindgen",
]

[[package]]
name = "gimli"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"

[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"

[[package]]
name = "hickory-client"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "949d2fef0bbdd31a0f6affc6bf390b4a0017492903eff6f7516cb382d9e85536"
dependencies = [
 "cfg-if",
 "data-encoding",
 "futures-channel",
 "futures-util",
 "hickory-proto",
 "once_cell",
 "radix_trie",
 "rand",
 "thiserror",
 "tokio",
 "tracing",
]

[[package]]
name = "hickory-proto"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5"
dependencies = [
 "async-trait",
 "cfg-if",
 "data-encoding",
 "enum-as-inner",
 "futures-channel",
 "futures-io",
 "futures-util",
 "idna",
 "ipnet",
 "once_cell",
 "rand",
 "thiserror",
 "tinyvec",
 "tokio",
 "tracing",
 "url",
]

[[package]]
name = "hickory-server"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35e6d1c2df0614595224b32479c72dd6fc82c9bda85962907c45fdb95a691489"
dependencies = [
 "async-trait",
 "bytes",
 "cfg-if",
 "enum-as-inner",
 "futures-util",
 "hickory-proto",
 "serde",
 "thiserror",
 "time",
 "tokio",
 "tokio-util",
 "tracing",
]

[[package]]
name = "hostname"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba"
dependencies = [
 "cfg-if",
 "libc",
 "windows",
]

[[package]]
name = "iana-time-zone"
version = "0.1.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
dependencies = [
 "android_system_properties",
 "core-foundation-sys",
 "iana-time-zone-haiku",
 "js-sys",
 "wasm-bindgen",
 "windows-core",
]

[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
 "cc",
]

[[package]]
name = "icu_collections"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
dependencies = [
 "displaydoc",
 "yoke",
 "zerofrom",
 "zerovec",
]

[[package]]
name = "icu_locid"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
dependencies = [
 "displaydoc",
 "litemap",
 "tinystr",
 "writeable",
 "zerovec",
]

[[package]]
name = "icu_locid_transform"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
dependencies = [
 "displaydoc",
 "icu_locid",
 "icu_locid_transform_data",
 "icu_provider",
 "tinystr",
 "zerovec",
]

[[package]]
name = "icu_locid_transform_data"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"

[[package]]
name = "icu_normalizer"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
dependencies = [
 "displaydoc",
 "icu_collections",
 "icu_normalizer_data",
 "icu_properties",
 "icu_provider",
 "smallvec",
 "utf16_iter",
 "utf8_iter",
 "write16",
 "zerovec",
]

[[package]]
name = "icu_normalizer_data"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"

[[package]]
name = "icu_properties"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
dependencies = [
 "displaydoc",
 "icu_collections",
 "icu_locid_transform",
 "icu_properties_data",
 "icu_provider",
 "tinystr",
 "zerovec",
]

[[package]]
name = "icu_properties_data"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"

[[package]]
name = "icu_provider"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
dependencies = [
 "displaydoc",
 "icu_locid",
 "icu_provider_macros",
 "stable_deref_trait",
 "tinystr",
 "writeable",
 "yoke",
 "zerofrom",
 "zerovec",
]

[[package]]
name = "icu_provider_macros"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "idna"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
dependencies = [
 "idna_adapter",
 "smallvec",
 "utf8_iter",
]

[[package]]
name = "idna_adapter"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
dependencies = [
 "icu_normalizer",
 "icu_properties",
]

[[package]]
name = "ipnet"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"

[[package]]
name = "is_terminal_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"

[[package]]
name = "itoa"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"

[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
 "once_cell",
 "wasm-bindgen",
]

[[package]]
name = "libc"
version = "0.2.169"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"

[[package]]
name = "litemap"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"

[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
 "autocfg",
 "scopeguard",
]

[[package]]
name = "log"
version = "0.4.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"

[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"

[[package]]
name = "memoffset"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
dependencies = [
 "autocfg",
]

[[package]]
name = "miniz_oxide"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924"
dependencies = [
 "adler2",
]

[[package]]
name = "mio"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd"
dependencies = [
 "libc",
 "wasi",
 "windows-sys 0.52.0",
]

[[package]]
name = "nanorand"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
dependencies = [
 "getrandom",
]

[[package]]
name = "nibble_vec"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43"
dependencies = [
 "smallvec",
]

[[package]]
name = "nix"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
 "bitflags",
 "cfg-if",
 "cfg_aliases",
 "libc",
 "memoffset",
]

[[package]]
name = "num-conv"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"

[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
 "autocfg",
]

[[package]]
name = "num_threads"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9"
dependencies = [
 "libc",
]

[[package]]
name = "object"
version = "0.36.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
 "memchr",
]

[[package]]
name = "once_cell"
version = "1.20.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"

[[package]]
name = "percent-encoding"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"

[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"

[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"

[[package]]
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"

[[package]]
name = "ppv-lite86"
version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
dependencies = [
 "zerocopy",
]

[[package]]
name = "proc-macro2"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99"
dependencies = [
 "unicode-ident",
]

[[package]]
name = "quote"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
dependencies = [
 "proc-macro2",
]

[[package]]
name = "radix_trie"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd"
dependencies = [
 "endian-type",
 "nibble_vec",
]

[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
 "libc",
 "rand_chacha",
 "rand_core",
]

[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
 "ppv-lite86",
 "rand_core",
]

[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
 "getrandom",
]

[[package]]
name = "rustc-demangle"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"

[[package]]
name = "rustversion"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"

[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"

[[package]]
name = "serde"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
dependencies = [
 "serde_derive",
]

[[package]]
name = "serde_derive"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"

[[package]]
name = "signal-hook-registry"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
dependencies = [
 "libc",
]

[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
 "autocfg",
]

[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"

[[package]]
name = "socket2"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
dependencies = [
 "libc",
 "windows-sys 0.52.0",
]

[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
dependencies = [
 "lock_api",
]

[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"

[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"

[[package]]
name = "syn"
version = "2.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1"
dependencies = [
 "proc-macro2",
 "quote",
 "unicode-ident",
]

[[package]]
name = "synstructure"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "syslog"
version = "7.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "019f1500a13379b7d051455df397c75770de6311a7a188a699499502704d9f10"
dependencies = [
 "hostname",
 "libc",
 "log",
 "time",
]

[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
 "thiserror-impl",
]

[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "time"
version = "0.3.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
dependencies = [
 "deranged",
 "itoa",
 "libc",
 "num-conv",
 "num_threads",
 "powerfmt",
 "serde",
 "time-core",
 "time-macros",
]

[[package]]
name = "time-core"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"

[[package]]
name = "time-macros"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
dependencies = [
 "num-conv",
 "time-core",
]

[[package]]
name = "tinystr"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
dependencies = [
 "displaydoc",
 "zerovec",
]

[[package]]
name = "tinyvec"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8"
dependencies = [
 "tinyvec_macros",
]

[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"

[[package]]
name = "tokio"
version = "1.43.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e"
dependencies = [
 "backtrace",
 "bytes",
 "libc",
 "mio",
 "pin-project-lite",
 "signal-hook-registry",
 "socket2",
 "tokio-macros",
 "windows-sys 0.52.0",
]

[[package]]
name = "tokio-macros"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "tokio-util"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
dependencies = [
 "bytes",
 "futures-core",
 "futures-sink",
 "pin-project-lite",
 "tokio",
]

[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
 "pin-project-lite",
 "tracing-attributes",
 "tracing-core",
]

[[package]]
name = "tracing-attributes"
version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "tracing-core"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
dependencies = [
 "once_cell",
]

[[package]]
name = "unicode-ident"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034"

[[package]]
name = "url"
version = "2.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
dependencies = [
 "form_urlencoded",
 "idna",
 "percent-encoding",
]

[[package]]
name = "utf16_iter"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"

[[package]]
name = "utf8_iter"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"

[[package]]
name = "utf8parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"

[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"

[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
 "cfg-if",
 "once_cell",
 "rustversion",
 "wasm-bindgen-macro",
]

[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
 "bumpalo",
 "log",
 "proc-macro2",
 "quote",
 "syn",
 "wasm-bindgen-shared",
]

[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
 "quote",
 "wasm-bindgen-macro-support",
]

[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
 "wasm-bindgen-backend",
 "wasm-bindgen-shared",
]

[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
 "unicode-ident",
]

[[package]]
name = "windows"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
 "windows-core",
 "windows-targets",
]

[[package]]
name = "windows-core"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
 "windows-targets",
]

[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
 "windows-targets",
]

[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
 "windows-targets",
]

[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
 "windows_aarch64_gnullvm",
 "windows_aarch64_msvc",
 "windows_i686_gnu",
 "windows_i686_gnullvm",
 "windows_i686_msvc",
 "windows_x86_64_gnu",
 "windows_x86_64_gnullvm",
 "windows_x86_64_msvc",
]

[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"

[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"

[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"

[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"

[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"

[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"

[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"

[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"

[[package]]
name = "write16"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"

[[package]]
name = "writeable"
version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"

[[package]]
name = "yoke"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
dependencies = [
 "serde",
 "stable_deref_trait",
 "yoke-derive",
 "zerofrom",
]

[[package]]
name = "yoke-derive"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
 "synstructure",
]

[[package]]
name = "zerocopy"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
dependencies = [
 "byteorder",
 "zerocopy-derive",
]

[[package]]
name = "zerocopy-derive"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]

[[package]]
name = "zerofrom"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
dependencies = [
 "zerofrom-derive",
]

[[package]]
name = "zerofrom-derive"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
 "synstructure",
]

[[package]]
name = "zerovec"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
dependencies = [
 "yoke",
 "zerofrom",
 "zerovec-derive",
]

[[package]]
name = "zerovec-derive"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
dependencies = [
 "proc-macro2",
 "quote",
 "syn",
]
0707010000000C000081A400000000000000000000000167AA0322000005AD000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/Cargo.toml[package]
name = "aardvark-dns"
# This version specification right below is reused by .packit.sh to generate rpm version
version = "1.14.0"
edition = "2018"
authors = ["github.com/containers"]
license = "Apache-2.0"
readme = "README.md"
description = "A container-focused DNS server"
homepage = "https://github.com/containers/aardvark-dns"
repository = "https://github.com/containers/aardvark-dns"
categories = ["virtualization"]
exclude = ["/.cirrus.yml", "/.github/*"]
rust-version = "1.76"

[package.metadata.vendor-filter]
# This list is not exhaustive.
platforms = ["x86_64-unknown-linux-gnu", "aarch64-unknown-linux-gnu", "powerpc64le-unknown-linux-gnu",
             "s390x-unknown-linux-gnu", "riscv64gc-unknown-linux-gnu",
             "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl",
             ]

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
clap = { version = "~4.5.28", features = ["derive"] }
syslog = "^7.0.0"
log = "0.4.25"
hickory-server = "0.24.2"
hickory-proto = { version = "0.24.2", features = ["tokio-runtime"] }
hickory-client = "0.24.2"
futures-util = { version = "0.3.31", default-features = false }
tokio = { version = "1.43.0", features = ["macros", "rt-multi-thread", "net", "signal"] }
nix = { version = "0.29.0", features = ["fs", "signal", "net"] }
libc = "0.2.169"
arc-swap = "1.7.1"
flume = "0.11.1"

[build-dependencies]
chrono = "0.4.39"
0707010000000D000081A400000000000000000000000167AA0322000009DE000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/DISTRO_PACKAGE.md# Aardvark-dns: Authoritative DNS server for A/AAAA container records

This document is currently written with Fedora as a reference. As Aardvark-dns
gets shipped in other distros, this should become a distro-agnostic
document.

## Fedora Users
Aardvark-dns is available as an official Fedora package on Fedora 35 and newer versions
and is only meant to be used with Podman v4 and newer releases. On Fedora 36
and newer, fresh installations of the podman package will automatically install
Aardvark-dns along with Netavark. If Aardvark-dns isn't present on your system,
install it using:

```console
$ sudo dnf install aardvark-dns
```

**NOTE:** Fedora 35 users will not be able to install Podman v4 using the default yum
repositories. Please consult the Podman packaging docs for instructions on how
to fetch Podman v4.0 on Fedora 35.

If you would like to test the latest unreleased upstream code, try the
podman-next COPR:

```console
$ sudo dnf copr enable rhcontainerbot/podman-next

$ sudo dnf install aardvark-dns
```

**CAUTION:** The podman-next COPR provides the latest unreleased sources of Podman,
Aardvark-dns and Aardvark-dns as rpms which would override the versions provided by
the official packages.

## Distro Packagers

The Fedora packaging sources for Aardvark-dns are available at the [Aardvark-dns
dist-git](https://src.fedoraproject.org/rpms/aardvark-dns).

The Fedora package builds Aardvark-dns using a compressed tarball of the vendored
libraries that is attached to each upstream release.
You can download them with the following:

`https://github.com/containers/netavark/releases/download/v{version}/aardvark-dns-v{version}.tar.gz`

And then create a cargo config file to point it to the vendor dir:
```
tar xvf %{SOURCE}
mkdir -p .cargo
cat >.cargo/config << EOF
[source.crates-io]
replace-with = "vendored-sources"

[source.vendored-sources]
directory = "vendor"
EOF
```

The `aardvark-dns` binary is installed to `/usr/libexec/podman/aardvark-dns`.

## Dependency of netavark package
The netavark package has a `Recommends` on the `aardvark-dns` package. The
aardvark-dns package will be installed by default with netavark, but Netavark
and Podman will be functional without it.

## Listing bundled dependencies
If you need to list the bundled dependencies in your packaging sources, you can
run the `cargo tree` command in the upstream source.
For example, Fedora's packaging source uses:

```
$ cargo tree --prefix none | awk '{print "Provides: bundled(crate("$1")) = "$2}' | sort | uniq
```
0707010000000E000081A400000000000000000000000167AA032200002C5D000000000000000000000000000000000000001C00000000aardvark-dns-1.14.0/LICENSE                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
0707010000000F000081A400000000000000000000000167AA032200000EE6000000000000000000000000000000000000001D00000000aardvark-dns-1.14.0/Makefile# This Makefile is intended for developer convenience.  For the most part
# all the targets here simply wrap calls to the `cargo` tool.  Therefore,
# most targets must be marked 'PHONY' to prevent `make` getting in the way
#
#prog :=xnixperms

DESTDIR ?=
PREFIX ?= /usr/local
LIBEXECDIR ?= ${PREFIX}/libexec
LIBEXECPODMAN ?= ${LIBEXECDIR}/podman

SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
# Get crate version by parsing the line that starts with version.
CRATE_VERSION ?= $(shell grep ^version Cargo.toml | awk '{print $$3}')
GIT_TAG ?= $(shell git describe --tags)

# Set this to any non-empty string to enable unoptimized
# build w/ debugging features.
debug ?=

# Set path to cargo executable
CARGO ?= cargo

# All complication artifacts, including dependencies and intermediates
# will be stored here, for all architectures.  Use a non-default name
# since the (default) 'target' is used/referenced ambiguously in many
# places in the tool-chain (including 'make' itself).
CARGO_TARGET_DIR ?= targets
export CARGO_TARGET_DIR  # 'cargo' is sensitive to this env. var. value.

ifdef debug
$(info debug is $(debug))
  # These affect both $(CARGO_TARGET_DIR) layout and contents
  # Ref: https://doc.rust-lang.org/cargo/guide/build-cache.html
  release :=
  profile :=debug
else
  release :=--release
  profile :=release
endif

.PHONY: all
all: build

bin:
	mkdir -p $@

$(CARGO_TARGET_DIR):
	mkdir -p $@

.PHONY: build
build: bin $(CARGO_TARGET_DIR)
	$(CARGO) build $(release)
	cp $(CARGO_TARGET_DIR)/$(profile)/aardvark-dns bin/aardvark-dns$(if $(debug),.debug,)

.PHONY: crate-publish
crate-publish:
	@if [ "v$(CRATE_VERSION)" != "$(GIT_TAG)" ]; then\
		echo "Git tag is not equivalent to the version set in Cargo.toml. Please checkout the correct tag";\
		exit 1;\
	fi
	@echo "It is expected that you have already done 'cargo login' before running this command. If not command may fail later"
	$(CARGO) publish --dry-run
	$(CARGO) publish

.PHONY: clean
clean:
	rm -rf bin
	if [ "$(CARGO_TARGET_DIR)" = "targets" ]; then rm -rf targets; fi
	$(MAKE) -C docs clean

#.PHONY: docs
#docs: ## build the docs on the host
#	$(MAKE) -C docs

.PHONY: install
install:
	install ${SELINUXOPT} -D -m0755 bin/aardvark-dns $(DESTDIR)/$(LIBEXECPODMAN)/aardvark-dns
	#$(MAKE) -C docs install

.PHONY: uninstall
uninstall:
	rm -f $(DESTDIR)/$(LIBEXECPODMAN)/aardvark-dns
	rm -f $(PREFIX)/share/man/man1/aardvark-dns*.1

#.PHONY: test
test: unit integration

# Used by CI to compile the unit tests but not run them
.PHONY: build_unit
build_unit: $(CARGO_TARGET_DIR)
	$(CARGO) test --no-run

#.PHONY: unit
unit: $(CARGO_TARGET_DIR)
	$(CARGO) test

#.PHONY: code_coverage
# Can be used by CI and users to generate code coverage report based on aardvark unit tests
code_coverage: $(CARGO_TARGET_DIR)
	# Downloads tarpaulin only if same version is not present on local
	$(CARGO) install cargo-tarpaulin
	$(CARGO) tarpaulin -v

#.PHONY: integration
integration: $(CARGO_TARGET_DIR)
	# needs to be run as root or with podman unshare --rootless-netns
	bats test/

.PHONY: mock-rpm
mock-rpm:
	rpkg local

.PHONY: validate
validate: $(CARGO_TARGET_DIR)
	$(CARGO) fmt --all -- --check
	$(CARGO) clippy -p aardvark-dns -- -D warnings

.PHONY: vendor-tarball
vendor-tarball: build install.cargo-vendor-filterer
	VERSION=$(shell bin/aardvark-dns --version | cut -f2 -d" ") && \
	$(CARGO) vendor-filterer --format=tar.gz --prefix vendor/ && \
	mv vendor.tar.gz aardvark-dns-v$$VERSION-vendor.tar.gz && \
	gzip -c bin/aardvark-dns > aardvark-dns.gz && \
	sha256sum aardvark-dns.gz aardvark-dns-v$$VERSION-vendor.tar.gz > sha256sum

.PHONY: install.cargo-vendor-filterer
install.cargo-vendor-filterer:
	$(CARGO) install cargo-vendor-filterer

.PHONY: help
help:
	@echo "usage: make $(prog) [debug=1]"
07070100000010000081A400000000000000000000000167AA03220000004B000000000000000000000000000000000000001B00000000aardvark-dns-1.14.0/OWNERSapprovers:
  - baude
  - lsm5
  - Luap99
  - mheon
reviewers:
  - flouthoc
07070100000011000081A400000000000000000000000167AA03220000054E000000000000000000000000000000000000001E00000000aardvark-dns-1.14.0/README.md# aardvark-dns

Aardvark-dns is an authoritative dns server for `A/AAAA` container records. It can forward other requests
to configured resolvers.

Read more about configuration in `src/backend/mod.rs`. It is mostly intended to be used with
[Netavark](https://github.com/containers/netavark/) which will launch it automatically if both are
installed.

```console
aardvark-dns 0.1.0

USAGE:
    aardvark-dns [OPTIONS] <SUBCOMMAND>

FLAGS:
    -h, --help       Print help information
    -V, --version    Print version information

OPTIONS:
    -c, --config <CONFIG>    Path to configuration directory
    -p, --port <PORT>        Host port for aardvark servers, defaults to 5533

SUBCOMMANDS:
    help    Print this message or the help of the given subcommand(s)
    run     Runs the aardvark dns server with the specified configuration directory
```

### MSRV (Minimum Supported Rust Version)

v1.76

We test that Netavark can be build on this Rust version and on some newer versions.
All newer versions should also build, and if they do not, the issue should be
reported and will be fixed. Older versions are not guaranteed to build and issues
will not be fixed.

### Build

```console
make
```

### Run Example

```console
RUST_LOG=trace ./bin/aardvark-dns --config src/test/config/podman/ --port 5533 run
```

### [Configuration file format](./config.md)
07070100000012000081A400000000000000000000000167AA032200000A39000000000000000000000000000000000000002500000000aardvark-dns-1.14.0/RELEASE_NOTES.md# Release Notes

## v1.14.0

* Dependency updates.

## v1.13.1

* Fix parsing of ipv6 link local addresses in resolv.conf ([#535](https://github.com/containers/aardvark-dns/issues/535))

## v1.13.0

* Set TTL to 0 for container names
* Allow forwarding of names with no ndots
* DNS: limit to 3 resolvers and use better timeout for them
* Ignore unknown resolv.conf options

## v1.12.2

* This releases fixes a security issue (CVE-2024-8418) where tcp connections where not handled correctly which allowed a container to block dns queries for other clients on the same network #500. Versions before v1.12.0 are unaffected as they do not have tcp support.

## v1.12.1

* Fixed problem with categories in Cargo.toml that prevented us from publishing v1.12.0

## v1.12.0

* Dependency updates
* Improve all around error handling and logging
* Added TCP/IP support
* Update upsteam resolvers on each refresh

## v1.11.0
* Do not allow "internal" networks to access DNS
* On SIGHUP, stop AV threads no longer needed and reload in memory those that are
* updated dependencies

## v1.10.0
* removed unused kill switch
* updated dependencies

## v1.9.0
* update trust-dns to hickory
* never report an error when the syslog init fails
* dependency updates

## v1.8.0
* dependency updates

## v1.7.0
* dependency updates

## v1.6.0
* dependency updates
* lower the TTL to 60s for container names

## v1.5.0
* dependency updates
* code of conduct added

## v1.4.0
* Add support for network scoped dns servers; declare DNS at a network level

## v1.3.0
* allow one or more dns servers in the aardvark config

## v1.2.0
* coredns: do not combine results of A and AAAA records
* run,serve: create aardvark pid in child before we notify parent process
* coredns: response message set recursion available if RD is true
* document configuration format

## v1.1.0
* Changed Aardvark to fork on startup to daemonize, as opposed to have this done by callers. This avoids race conditions around startup.
* Name resolution is now case-insensitive.

## v1.0.3
* Updated dependancy libraries
* Reduction in CPU use
* Fixed bug with duplicate network names

## v1.0.2
* Updated dependency libraries
* Removed vergen dependency

## v1.0.1
- Remove vendor directory from upstream github repository
- Vendored libraries updates

## v1.0.0
- First release of aardvark-dns.

## v1.0.0-RC2
- Slew of bug fixes related to reverse lookups, NXDOMAIN returns, and so on. Getting very close to first official release.

## v1.0.0-RC1
- This is the first release candidate of Aardvark's initial release! All major functionality is implemented and working.
07070100000013000081A400000000000000000000000167AA0322000003D8000000000000000000000000000000000000001D00000000aardvark-dns-1.14.0/build.rsuse chrono::{DateTime, Utc};
use std::env;
use std::process::Command;

fn main() {
    // Generate the default 'cargo:' instruction output
    println!("cargo:rerun-if-changed=build.rs");

    // get timestamp
    let now = match env::var("SOURCE_DATE_EPOCH") {
        Ok(val) => DateTime::from_timestamp(val.parse::<i64>().unwrap(), 0).unwrap(),
        Err(_) => Utc::now(),
    };
    println!("cargo:rustc-env=BUILD_TIMESTAMP={}", now.to_rfc3339());

    // get rust target triple from TARGET env
    println!(
        "cargo:rustc-env=BUILD_TARGET={}",
        std::env::var("TARGET").unwrap()
    );

    // get git commit
    let command = Command::new("git").args(["rev-parse", "HEAD"]).output();
    let commit = match command {
        Ok(output) => String::from_utf8(output.stdout).unwrap(),
        // if error, e.g. build from source with git repo, just show empty string
        Err(_) => "".to_string(),
    };
    println!("cargo:rustc-env=GIT_COMMIT={}", commit);
}
07070100000014000081A400000000000000000000000167AA032200000656000000000000000000000000000000000000001E00000000aardvark-dns-1.14.0/config.md# Configuration format

Aardvark-dns will read configuration files from a given directory.

Inside this directory there should be at least one config file. The name of the file equals the network name.

### First line
The first line in the config must contain a comma separated list of listening ips for this network, usually the bridge ips.
At least one ip must be given.
**Note**: An optional second column of comma delimited domain name servers can be used at the network level. All containers
on that network will inherit all the specified name servers instead of using the host's resolver.

```
[comma seperated ip4,ipv6 list][(optional)[space][comma seperated DNS servers]]
```

### Container entries
All following lines must contain the dns entries in this format:
```
[containerID][space][comma sparated ipv4 list][space][comma separated ipv6 list][space][comma separated dns names][(optional)[space][comma seperated DNS servers]]
```

Aardvark-dns will reload all config files when receiving a SIGHUB signal.


## Example

```
10.0.0.1,fdfd::1
f35256b5e2f72ec8cb7d974d4f8841686fc8921fdfbc867285b50164e313f715 10.0.0.2 fdfd::2 testmulti1 8.8.8.8,1.1.1.1
e5df0cdbe0136a30cc3e848d495d2cc6dada25b7dedc776b4584ce2cbba6f06f 10.0.0.3 fdfd::3 testmulti2
```
## Example with network scoped DNS servers

```
10.0.0.1,fdfd::1 8.8.8.8,1.1.1.1
f35256b5e2f72ec8cb7d974d4f8841686fc8921fdfbc867285b50164e313f715 10.0.0.2 fdfd::2 testmulti1 8.8.8.8,1.1.1.1
e5df0cdbe0136a30cc3e848d495d2cc6dada25b7dedc776b4584ce2cbba6f06f 10.0.0.3 fdfd::3 testmulti2
```

Also see [./src/test/config/](./src/test/config/) for more config examples
07070100000015000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001C00000000aardvark-dns-1.14.0/contrib07070100000016000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002300000000aardvark-dns-1.14.0/contrib/cirrus07070100000017000081A400000000000000000000000167AA03220000081D000000000000000000000000000000000000002A00000000aardvark-dns-1.14.0/contrib/cirrus/lib.sh

# Library of common, shared utility functions.  This file is intended
# to be sourced by other scripts, not called directly.

# BEGIN Global export of all variables
set -a

# Automation library installed at image-build time,
# defining $AUTOMATION_LIB_PATH in this file.
if [[ -r "/etc/automation_environment" ]]; then
    source /etc/automation_environment
fi

if [[ -n "$AUTOMATION_LIB_PATH" ]]; then
        source $AUTOMATION_LIB_PATH/common_lib.sh
else
    (
    echo "WARNING: It does not appear that containers/automation was installed."
    echo "         Functionality of most of this library will be negatively impacted"
    echo "         This ${BASH_SOURCE[0]} was loaded by ${BASH_SOURCE[1]}"
    ) > /dev/stderr
fi

# Unsafe env. vars for display
SECRET_ENV_RE='(ACCOUNT)|(GC[EP]..+)|(SSH)|(PASSWORD)|(TOKEN)'

# setup.sh calls make_cienv() to cache these values for the life of the VM
if [[ -r "/etc/ci_environment" ]]; then
    source /etc/ci_environment
else  # set default values - see make_cienv() below
    # Install rust packages globally instead of per-user
    CARGO_HOME="${CARGO_HOME:-/usr/local/cargo}"
    # Ensure cargo packages can be executed
    PATH="$PATH:$CARGO_HOME/bin"
fi

# END Global export of all variables
set -a

# Shortcut to automation library timeout/retry function
retry() { err_retry 8 1000 "" "$@"; }  # just over 4 minutes max

# Helper to ensure a consistent environment across multiple CI scripts
# containers, and shell environments (e.g. hack/get_ci_vm.sh)
make_cienv(){
    local envname
    local envval
    local SETUP_ENVIRONMENT=1
    for envname in CARGO_HOME PATH CIRRUS_WORKING_DIR SETUP_ENVIRONMENT; do
        envval="${!envname}"
        # Properly escape values to prevent injection
        printf -- "$envname=%q\n" "$envval"
    done
}

complete_setup(){
    set +x
    msg "************************************************************"
    msg "Completing environment setup, writing vars:"
    msg "************************************************************"
    make_cienv | tee -a /etc/ci_environment
}
07070100000018000081A400000000000000000000000167AA032200000375000000000000000000000000000000000000003B00000000aardvark-dns-1.14.0/contrib/cirrus/netavark_cache_groom.sh#!/bin/bash
#
# This script is intended to be run from Cirrus-CI to prepare the
# rust targets cache for re-use during subsequent runs.  This mainly
# involves removing files and directories which change frequently
# but are cheap/quick to regenerate - i.e. prevent "cache-flapping".
# Any other use of this script is not supported and may cause harm.

set -eo pipefail

SCRIPT_DIRPATH=$(dirname ${BASH_SOURCE[0]})
source $SCRIPT_DIRPATH/lib.sh

if [[ "$CIRRUS_CI" != true ]] || [[ -z "$NETAVARK_BRANCH" ]]; then
  die "Script is not intended for use outside of Cirrus-CI"
fi

SCRIPT_DEST=$SCRIPT_DIRPATH/cache_groom.sh
showrun curl --location --silent --show-error -o $SCRIPT_DEST \
  https://raw.githubusercontent.com/containers/netavark/$NETAVARK_BRANCH/contrib/cirrus/cache_groom.sh

# Certain common automation library calls assume execution from this file
exec bash $SCRIPT_DEST
07070100000019000081ED00000000000000000000000167AA032200000890000000000000000000000000000000000000002D00000000aardvark-dns-1.14.0/contrib/cirrus/runner.sh#!/bin/bash

set -eo pipefail

# This script runs in the Cirrus CI environment, invoked from .cirrus.yml .
# It can also be invoked manually in a `hack/get_ci_vm.sh` environment,
# documentation of said usage is TBI.
#
# The principal deciding factor is the first argument.  For any
# given value 'xyz' there must be a function '_run_xyz' to handle that
# argument.

source $(dirname ${BASH_SOURCE[0]})/lib.sh

_run_noarg() {
    die "runner.sh must be called with a single argument"
}

_run_build() {
    # Assume we're on a fast VM, compile everything needed by the
    # rest of CI since subsequent tasks may have limited resources.
    make all debug=1
    make build_unit  # reuses some debug binaries
    make all  # optimized/non-debug binaries

    # This will get scooped up and become part of the artifact archive.
    # Identify where the binary came from to benefit downstream consumers.
        cat | tee bin/aardvark-dns.info << EOF
repo: $CIRRUS_REPO_CLONE_URL
branch: $CIRRUS_BASE_BRANCH
title: $CIRRUS_CHANGE_TITLE
commit: $CIRRUS_CHANGE_IN_REPO
build: https://cirrus-ci.com/build/$CIRRUS_BUILD_ID
task: https://cirrus-ci.com/task/$CIRRUS_TASK_ID
EOF
}

_run_build_aarch64() {
    _run_build
}

_run_validate() {
    make validate
}

_run_validate_aarch64() {
    _run_validate
}

_run_unit() {
    make unit
}

_run_unit_aarch64() {
    _run_unit
}

_run_integration() {
    make integration
}

_run_integration_aarch64() {
    make # FIXME: (@lsm5) investigate why cached binary isn't being reused
    _run_integration
}

show_env_vars

msg "************************************************************"
msg "Toolchain details"
msg "************************************************************"
rustc --version
cargo --version

msg "************************************************************"
msg "Runner executing '$1' on $OS_REL_VER"
msg "************************************************************"

((${SETUP_ENVIRONMENT:-0})) || \
    die "Expecting setup.sh to have completed successfully"

cd "${CIRRUS_WORKING_DIR}/"

handler="_run_${1:-noarg}"

if [ "$(type -t $handler)" != "function" ]; then
    die "Unknown/Unsupported runner.sh argument '$1'"
fi

$handler
0707010000001A000081ED00000000000000000000000167AA0322000005B2000000000000000000000000000000000000002C00000000aardvark-dns-1.14.0/contrib/cirrus/setup.sh#!/bin/bash

# This script configures the CI runtime environment.  It's intended
# to be used by Cirrus-CI, not humans.

set -e

source $(dirname $0)/lib.sh

# Only do this once
if [[ -r "/etc/ci_environment" ]]; then
    msg "It appears ${BASH_SOURCE[0]} already ran, exiting."
    exit 0
fi
trap "complete_setup" EXIT

msg "************************************************************"
msg "Setting up runtime environment"
msg "************************************************************"
show_env_vars

req_env_vars NETAVARK_URL NETAVARK_BRANCH
cd /usr/libexec/podman
rm -vf netavark*
if showrun curl --fail --location -o /tmp/netavark.zip "$NETAVARK_URL" && \
   unzip -o /tmp/netavark.zip; then

    if [[ $(uname -m) != "x86_64" ]]; then
        showrun mv netavark.$(uname -m)-unknown-linux-gnu netavark
    fi
    showrun chmod a+x /usr/libexec/podman/netavark
else
    warn "Error downloading/extracting the latest pre-compiled netavark binary from CI"
    showrun cargo install \
      --root /usr/libexec/podman \
      --git https://github.com/containers/netavark \
      --branch "$NETAVARK_BRANCH"
    showrun mv /usr/libexec/podman/bin/netavark /usr/libexec/podman
fi
# show netavark commit in CI logs
showrun /usr/libexec/podman/netavark version

# Warning, this isn't the end.  An exit-handler is installed to finalize
# setup of env. vars.  This is required for runner.sh to operate properly.
# See complete_setup() in lib.sh for details.
0707010000001B000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002100000000aardvark-dns-1.14.0/contrib/perf0707010000001C000081ED00000000000000000000000167AA032200000073000000000000000000000000000000000000002D00000000aardvark-dns-1.14.0/contrib/perf/nslookup.py#!/usr/bin/env python

import socket
import sys

for i in range(0, 10_000):
    socket.getaddrinfo(sys.argv[1], 0)
0707010000001D000081ED00000000000000000000000167AA0322000002F7000000000000000000000000000000000000002800000000aardvark-dns-1.14.0/contrib/perf/run.sh#!/bin/bash

PODMAN=${PODMAN-podman}
DIR=$(dirname -- "${BASH_SOURCE[0]}")
IMAGE=docker.io/library/python
JOBS=${JOBS:-$(nproc)}
netname="testnet"


$PODMAN rm -fa -t0
$PODMAN network rm -f $netname

$PODMAN network create $netname

# first command to spawn aardvark-dns
$PODMAN run -i -d --network $netname --name starter $IMAGE

perf stat -p $(pgrep -n aardvark-dns) &> $DIR/perf.log &

for i in $( seq 1 $JOBS )
do
    $PODMAN run -v $DIR/nslookup.py:/nslookup.py:z --name test$i --network $netname:alias=testabc$i -d $IMAGE /nslookup.py testabc$i
done

$PODMAN rm -f -t0 starter

# wait for perf to finish
# because aardvark-dns exists on its own when all containers are done this should not hang
wait

#
$PODMAN rm -fa -t0
$PODMAN network rm -f $netname
0707010000001E000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001900000000aardvark-dns-1.14.0/docs0707010000001F000081A400000000000000000000000167AA03220000014B000000000000000000000000000000000000002A00000000aardvark-dns-1.14.0/docs/publish-crate.md# Publishing aardvark-dns crate to crates.io
### Steps
* Make sure you have already done `cargo login` on your current session with a valid token.
* `cd aardvark-dns`
* Git checkout the version which you want to publish.
* `make crate-publish`
* New version should be reflected here: https://crates.io/crates/aardvark-dns/versions
07070100000020000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001900000000aardvark-dns-1.14.0/hack07070100000021000081ED00000000000000000000000167AA032200000A19000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/hack/get_ci_vm.sh#!/usr/bin/env bash

#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by Red Hat podman developers who need
# to debug problems specifically related to Cirrus-CI automated testing.
# It requires that you have been granted prior access to create VMs in
# google-cloud.  For non-Red Hat contributors, VMs are available as-needed,
# with supervision upon request.

set -e

SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")

# Help detect what get_ci_vm container called this script
GET_CI_VM="${GET_CI_VM:-0}"
in_get_ci_vm() {
    if ((GET_CI_VM==0)); then
        echo "Error: $1 is not intended for use in this context"
        exit 2
    fi
}

# get_ci_vm APIv1 container entrypoint calls into this script
# to obtain required repo. specific configuration options.
if [[ "$1" == "--config" ]]; then
    in_get_ci_vm "$1"  # handles GET_CI_VM==0 case
    case "$GET_CI_VM" in
        1)
            cat <<EOF
DESTDIR="/var/tmp/aardvark-dns"
UPSTREAM_REPO="https://github.com/containers/aardvark-dns.git"
CI_ENVFILE="/etc/ci_environment"
GCLOUD_PROJECT="netavark-2021"
GCLOUD_IMGPROJECT="libpod-218412"
GCLOUD_CFG="netavark"
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-c}"
GCLOUD_CPUS="8"
GCLOUD_MEMORY="8Gb"
GCLOUD_DISK="200"
EOF
            ;;
        2)
            # get_ci_vm APIv2 configuration details
            echo "AWS_PROFILE=containers"
            ;;
        *)
            echo "Error: Your get_ci_vm container image is too old."
            ;;
    esac
elif [[ "$1" == "--setup" ]]; then
    in_get_ci_vm "$1"
    # get_ci_vm container entrypoint calls us with this option on the
    # Cirrus-CI environment instance, to perform repo.-specific setup.
    cd $REPO_DIRPATH
    echo "+ Loading ./contrib/cirrus/lib.sh" > /dev/stderr
    source ./contrib/cirrus/lib.sh
    echo "+ Running environment setup" > /dev/stderr
    ./contrib/cirrus/setup.sh "$CIRRUS_TASK_NAME"
else
    # Pass this repo and CLI args into container for VM creation/management
    mkdir -p $HOME/.config/gcloud/ssh
    mkdir -p $HOME/.aws
    podman run -it --rm \
        --tz=local \
        -e NAME="$USER" \
        -e SRCDIR=/src \
        -e GCLOUD_ZONE="$GCLOUD_ZONE" \
        -e A_DEBUG="${A_DEBUG:-0}" \
        -v $REPO_DIRPATH:/src:O \
        -v $HOME/.config/gcloud:/root/.config/gcloud:z \
        -v $HOME/.config/gcloud/ssh:/root/.ssh:z \
        -v $HOME/.aws:/root/.aws:z \
        quay.io/libpod/get_ci_vm:latest "$@"
fi
07070100000022000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001A00000000aardvark-dns-1.14.0/plans07070100000023000081A400000000000000000000000167AA032200000218000000000000000000000000000000000000002300000000aardvark-dns-1.14.0/plans/main.fmfdiscover:
    how: fmf
execute:
    how: tmt

/upstream:
    summary: Run tests on upstream PRs
    discover+:
        filter: tag:upstream
    adjust+:
        enabled: false
        when: initiator is not defined or initiator != packit

/downstream:
    summary: Run tests on bodhi / errata and dist-git PRs
    discover+:
        filter: tag:downstream
        dist-git-install-builddeps: true
        dist-git-source: true
        dist-git-remove-fmf-root: true
    adjust+:
        enabled: false
        when: initiator == packit
07070100000024000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001800000000aardvark-dns-1.14.0/rpm07070100000025000081A400000000000000000000000167AA0322000009A8000000000000000000000000000000000000002A00000000aardvark-dns-1.14.0/rpm/aardvark-dns.spec# trust-dns-{client,server} not available
# using vendored deps

%global with_debug 1

%if 0%{?with_debug}
%global _find_debuginfo_dwz_opts %{nil}
%global _dwz_low_mem_die_limit 0
%else
%global debug_package %{nil}
%endif

Name: aardvark-dns
%if %{defined copr_username}
Epoch: 102
%else
Epoch: 2
%endif
# DO NOT TOUCH the Version string!
# The TRUE source of this specfile is:
# https://github.com/containers/podman/blob/main/rpm/podman.spec
# If that's what you're reading, Version must be 0, and will be updated by Packit for
# copr and koji builds.
# If you're reading this on dist-git, the version is automatically filled in by Packit.
Version: 0
# The `AND` needs to be uppercase in the License for SPDX compatibility
License: Apache-2.0 AND MIT AND Zlib
Release: %autorelease
%if %{defined golang_arches_future}
ExclusiveArch: %{golang_arches_future}
%else
ExclusiveArch: aarch64 ppc64le s390x x86_64
%endif
Summary: Authoritative DNS server for A/AAAA container records
URL: https://github.com/containers/%{name}
# Tarballs fetched from upstream's release page
Source0: %{url}/archive/v%{version}.tar.gz
Source1: %{url}/releases/download/v%{version}/%{name}-v%{version}-vendor.tar.gz
BuildRequires: cargo
BuildRequires: git-core
BuildRequires: make
%if %{defined rhel}
# rust-toolset requires the `local` repo enabled on non-koji ELN build environments
BuildRequires: rust-toolset
%else
BuildRequires: rust-packaging
BuildRequires: rust-srpm-macros
%endif

%description
%{summary}

Forwards other request to configured resolvers.
Read more about configuration in `src/backend/mod.rs`.

%prep
%autosetup -Sgit %{name}-%{version}
# Following steps are only required on environments like koji which have no
# network access and thus depend on the vendored tarball. Copr pulls
# dependencies directly from the network.
%if !%{defined copr_username}
tar fx %{SOURCE1}
%if 0%{?fedora} || 0%{?rhel} >= 10
%cargo_prep -v vendor
%else
%cargo_prep -V 1
%endif
%endif

%build
%{__make} CARGO="%{__cargo}" build
%if (0%{?fedora} || 0%{?rhel} >= 10) && !%{defined copr_username}
%cargo_license_summary
%{cargo_license} > LICENSE.dependencies
%cargo_vendor_manifest
%endif

%install
%{__make} DESTDIR=%{buildroot} PREFIX=%{_prefix} install

%files
%license LICENSE
%if (0%{?fedora} || 0%{?rhel} >= 10) && !%{defined copr_username}
%license LICENSE.dependencies
%license cargo-vendor.txt
%endif
%dir %{_libexecdir}/podman
%{_libexecdir}/podman/%{name}

%changelog
%autochangelog
07070100000026000081A400000000000000000000000167AA032200000106000000000000000000000000000000000000002400000000aardvark-dns-1.14.0/rpm/gating.yaml--- !Policy
product_versions:
  - fedora-*
decision_context: bodhi_update_push_stable
rules:
  - !PassingTestCaseRule {test_case_name: fedora-ci.koji-build.tier0.functional}

--- !Policy
product_versions:
  - rhel-*
decision_context: osci_compose_gate
rules: []
07070100000027000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001800000000aardvark-dns-1.14.0/src07070100000028000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002000000000aardvark-dns-1.14.0/src/backend07070100000029000081A400000000000000000000000167AA032200001911000000000000000000000000000000000000002700000000aardvark-dns-1.14.0/src/backend/mod.rsuse log::error;
use std::collections::HashMap;
use std::net::IpAddr;
use std::vec::Vec;

// The core structure of the in-memory backing store for the DNS server.
// TODO: I've initially intermingled v4 and v6 addresses for simplicity; the
// server will get back a mix of responses and filter for v4/v6 from there.
// This may not be a good decision, not sure yet; we can split later if
// necessary.
pub struct DNSBackend {
    // Map of IP -> Network membership.
    // Every container must have an entry in this map, otherwise we will not
    // service requests to the Podman TLD for it.
    pub ip_mappings: HashMap<IpAddr, Vec<String>>,
    // Map of network name to map of name to IP addresses.
    pub name_mappings: HashMap<String, HashMap<String, Vec<IpAddr>>>,
    // Map of network name to map of IP address to container name.
    pub reverse_mappings: HashMap<String, HashMap<IpAddr, Vec<String>>>,
    // Map of IP address to DNS server IPs to service queries not handled
    // directly.
    pub ctr_dns_server: HashMap<IpAddr, Option<Vec<IpAddr>>>,
    // Map of network name and DNS server IPs.
    pub network_dns_server: HashMap<String, Vec<IpAddr>>,
    // Map of network name to bool (network is/is not internal)
    pub network_is_internal: HashMap<String, bool>,

    // search_domain used by aardvark-dns
    pub search_domain: String,
}

impl DNSBackend {
    // Create a new backend from the given set of network mappings.
    pub fn new(
        containers: HashMap<IpAddr, Vec<String>>,
        networks: HashMap<String, HashMap<String, Vec<IpAddr>>>,
        reverse: HashMap<String, HashMap<IpAddr, Vec<String>>>,
        ctr_dns_server: HashMap<IpAddr, Option<Vec<IpAddr>>>,
        network_dns_server: HashMap<String, Vec<IpAddr>>,
        network_is_internal: HashMap<String, bool>,
        mut search_domain: String,
    ) -> DNSBackend {
        // dns request always end with dot so append one for easier compare later
        if let Some(c) = search_domain.chars().rev().nth(0) {
            if c != '.' {
                search_domain.push('.')
            }
        }
        DNSBackend {
            ip_mappings: containers,
            name_mappings: networks,
            reverse_mappings: reverse,
            ctr_dns_server,
            network_dns_server,
            network_is_internal,
            search_domain,
        }
    }

    // Handle a single DNS lookup made by a given IP.
    // Returns all the ips for the given entry name
    pub fn lookup(
        &self,
        requester: &IpAddr,
        network_name: &str,
        entry: &str,
    ) -> Option<Vec<IpAddr>> {
        // Normalize lookup entry to lowercase.
        let mut name = entry.to_lowercase();

        // Trim off configured search domain if needed as keys do not contain it.
        // There doesn't seem to be a nicer way to do that:
        // https://users.rust-lang.org/t/can-strip-suffix-mutate-a-string-value/86852
        if name.ends_with(&self.search_domain) {
            name.truncate(name.len() - self.search_domain.len())
        }

        // if this is a fully qualified name, remove dots so backend can perform search
        if name.ends_with(".") {
            name.truncate(name.len() - 1)
        }

        let owned_netns: Vec<String>;

        let nets = match self.ip_mappings.get(requester) {
            Some(n) => n,
            // no source ip found let's just allow access to the current network where the request was made
            // On newer rust versions in CI we can return &vec![network_name.to_string()] directly without the extra assignment to the outer scope
            None => {
                owned_netns = vec![network_name.to_string()];
                &owned_netns
            }
        };

        let mut results: Vec<IpAddr> = Vec::new();

        for net in nets {
            let net_names = match self.name_mappings.get(net) {
                Some(n) => n,
                None => {
                    error!("Container with IP {} belongs to network {} but there is no listing in networks table!", requester.to_string(), net);
                    continue;
                }
            };

            if let Some(addrs) = net_names.get(&name) {
                results.append(&mut addrs.clone());
            }
        }

        if results.is_empty() {
            return None;
        }

        Some(results)
    }

    // Returns list of network resolvers for a particular container
    pub fn get_network_scoped_resolvers(&self, requester: &IpAddr) -> Option<Vec<IpAddr>> {
        let mut results: Vec<IpAddr> = Vec::new();

        match self.ip_mappings.get(requester) {
            Some(nets) => {
                for net in nets {
                    match self.network_dns_server.get(net) {
                        Some(resolvers) => results.extend_from_slice(resolvers),
                        None => {
                            continue;
                        }
                    };
                }
            }
            None => return None,
        };

        Some(results)
    }

    // Checks if a container is associated with only internal networks.
    // Returns true if and only if a container is only present in
    // internal networks.
    pub fn ctr_is_internal(&self, requester: &IpAddr) -> bool {
        match self.ip_mappings.get(requester) {
            Some(nets) => {
                for net in nets {
                    match self.network_is_internal.get(net) {
                        Some(internal) => {
                            if !internal {
                                return false;
                            }
                        }
                        None => continue,
                    }
                }
            }
            // For safety, if we don't know about the IP, assume it's probably
            // someone on the host asking; let them access DNS.
            None => return false,
        }

        true
    }

    /// Return a single name resolved via mapping if it exists.
    pub fn reverse_lookup(&self, requester: &IpAddr, lookup_ip: &IpAddr) -> Option<&Vec<String>> {
        let nets = self.ip_mappings.get(requester)?;

        for ips in nets.iter().filter_map(|v| self.reverse_mappings.get(v)) {
            if let Some(names) = ips.get(lookup_ip) {
                return Some(names);
            }
        }

        None
    }
}
0707010000002A000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002100000000aardvark-dns-1.14.0/src/commands0707010000002B000081A400000000000000000000000167AA03220000001E000000000000000000000000000000000000002800000000aardvark-dns-1.14.0/src/commands/mod.rspub mod run;
pub mod version;
0707010000002C000081A400000000000000000000000167AA032200000BCB000000000000000000000000000000000000002800000000aardvark-dns-1.14.0/src/commands/run.rs//! Runs the aardvark dns server with provided config
use crate::server::serve;
use clap::Parser;
use nix::unistd;
use nix::unistd::{fork, ForkResult};
use std::io::Error;
use std::os::unix::io::AsRawFd;

#[derive(Parser, Debug)]
pub struct Run {}

impl Run {
    /// The run command runs the aardvark-dns server with the given configuration.
    pub fn new() -> Self {
        Self {}
    }

    pub fn exec(
        &self,
        input_dir: String,
        port: u16,
        filter_search_domain: String,
    ) -> Result<(), Error> {
        // create a temporary path for unix socket
        // so parent can communicate with child and
        // only exit when child is ready to serve.
        let (ready_pipe_read, ready_pipe_write) = nix::unistd::pipe()?;

        // fork and verify if server is running
        // and exit parent
        // setsid() ensures that there is no controlling terminal on the child process
        match unsafe { fork() } {
            Ok(ForkResult::Parent { child, .. }) => {
                log::debug!("starting aardvark on a child with pid {}", child);
                // close write here to make sure the read does not hang when
                // child never sends message because it exited to early...
                drop(ready_pipe_write);
                // verify aardvark here and block till will start
                let i = unistd::read(ready_pipe_read.as_raw_fd(), &mut [0_u8; 1])?;
                drop(ready_pipe_read);
                if i == 0 {
                    // we did not get nay message -> child exited with error
                    Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        "Error from child process",
                    ))
                } else {
                    Ok(())
                }
            }
            Ok(ForkResult::Child) => {
                drop(ready_pipe_read);
                // create aardvark pid and then notify parent
                if let Err(er) = serve::create_pid(&input_dir) {
                    return Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        format!("Error creating aardvark pid {}", er),
                    ));
                }

                if let Err(er) =
                    serve::serve(&input_dir, port, &filter_search_domain, ready_pipe_write)
                {
                    return Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        format!("Error starting server {}", er),
                    ));
                }
                Ok(())
            }
            Err(err) => {
                log::debug!("fork failed with error {}", err);
                Err(std::io::Error::new(
                    std::io::ErrorKind::Other,
                    format!("fork failed with error: {}", err),
                ))
            }
        }
    }
}

impl Default for Run {
    fn default() -> Self {
        Self::new()
    }
}
0707010000002D000081A400000000000000000000000167AA0322000003D6000000000000000000000000000000000000002C00000000aardvark-dns-1.14.0/src/commands/version.rsuse clap::Parser;
use std::fmt;
use std::io::Error;

#[derive(Parser, Debug)]
pub struct Version {}

#[derive(Debug)]
struct Info {
    version: &'static str,
    commit: &'static str,
    build_time: &'static str,
    target: &'static str,
}

// since we do not need a json library here we just create the json output manually
impl fmt::Display for Info {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(
            f,
            "{{
  \"version\": \"{}\",
  \"commit\": \"{}\",
  \"build_time\": \"{}\",
  \"target\": \"{}\"
}}",
            self.version, self.commit, self.build_time, self.target
        )
    }
}

impl Version {
    pub fn exec(&self) -> Result<(), Error> {
        let info = Info {
            version: env!("CARGO_PKG_VERSION"),
            commit: env!("GIT_COMMIT"),
            build_time: env!("BUILD_TIMESTAMP"),
            target: env!("BUILD_TARGET"),
        };
        println!("{}", info);

        Ok(())
    }
}
0707010000002E000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/src/config0707010000002F000081A400000000000000000000000167AA032200000060000000000000000000000000000000000000002C00000000aardvark-dns-1.14.0/src/config/constants.rspub static AARDVARK_PID_FILE: &str = "aardvark.pid";
pub static INTERNAL_SUFFIX: &str = "%int";
07070100000030000081A400000000000000000000000167AA032200003CCC000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/src/config/mod.rsuse crate::backend::DNSBackend;
use crate::error::{AardvarkError, AardvarkResult};
use log::error;
use std::collections::HashMap;
use std::fs::{metadata, read_dir, read_to_string};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::vec::Vec;
pub mod constants;

// Parse configuration files in the given directory.
// Configuration files are formatted as follows:
// The name of the file will be interpreted as the name of the network.
// The first line must be the gateway IP(s) of the network, comma-separated.
// All subsequent individual lines contain info on a single container and are
// formatted as:
// <container ID, space, IPv4 address, space, IPv6 address, space, comma-separated list of name and aliases>
// Where space is a single space character.
// Returns a complete DNSBackend struct (all that is necessary for looks) and

// Silent clippy: sometimes clippy marks useful tyes as complex and for this case following type is
// convinient
#[allow(clippy::type_complexity)]
pub fn parse_configs(
    dir: &str,
    filter_search_domain: &str,
) -> AardvarkResult<(
    DNSBackend,
    HashMap<String, Vec<Ipv4Addr>>,
    HashMap<String, Vec<Ipv6Addr>>,
)> {
    if !metadata(dir)?.is_dir() {
        return Err(AardvarkError::msg(format!(
            "config directory {} must exist and be a directory",
            dir
        )));
    }

    let mut network_membership: HashMap<String, Vec<String>> = HashMap::new();
    let mut container_ips: HashMap<String, Vec<IpAddr>> = HashMap::new();
    let mut reverse: HashMap<String, HashMap<IpAddr, Vec<String>>> = HashMap::new();
    let mut network_names: HashMap<String, HashMap<String, Vec<IpAddr>>> = HashMap::new();
    let mut listen_ips_4: HashMap<String, Vec<Ipv4Addr>> = HashMap::new();
    let mut listen_ips_6: HashMap<String, Vec<Ipv6Addr>> = HashMap::new();
    let mut ctr_dns_server: HashMap<IpAddr, Option<Vec<IpAddr>>> = HashMap::new();
    let mut network_dns_server: HashMap<String, Vec<IpAddr>> = HashMap::new();
    let mut network_is_internal: HashMap<String, bool> = HashMap::new();

    // Enumerate all files in the directory, read them in one by one.
    // Steadily build a map of what container has what IPs and what
    // container is in what networks.
    let configs = read_dir(dir)?;
    for config in configs {
        // Each entry is a result. Interpret Err to mean the config was removed
        // while we were working; warn only, don't error.
        // Might be safer to completely restart the process, but there's also a
        // chance that, if we do that, we never finish and update the config,
        // assuming the files in question are modified at a sufficiently high
        // rate.
        match config {
            Ok(cfg) => {
                // dont process aardvark pid files
                if let Some(path) = cfg.path().file_name() {
                    if path == constants::AARDVARK_PID_FILE {
                        continue;
                    }
                }
                let parsed_network_config = match parse_config(cfg.path().as_path()) {
                    Ok(c) => c,
                    Err(e) => {
                        if e.kind() != std::io::ErrorKind::NotFound {
                            error!(
                                "Error reading config file {:?} for server update: {}",
                                cfg.path(),
                                e
                            )
                        }
                        continue;
                    }
                };

                let mut internal = false;

                let network_name: String = match cfg.path().file_name() {
                    // This isn't *completely* safe, but I do not foresee many
                    // cases where our network names include non-UTF8
                    // characters.
                    Some(s) => match s.to_str() {
                        Some(st) => {
			    let name_full = st.to_string();
			    if name_full.ends_with(constants::INTERNAL_SUFFIX) {
				internal = true;
			    }
			    name_full.strip_suffix(constants::INTERNAL_SUFFIX).unwrap_or(&name_full).to_string()
			},
                        None => return Err(AardvarkError::msg(
                            format!("configuration file {} name has non-UTF8 characters", s.to_string_lossy()),
                        )),
                    },
                    None => return Err(AardvarkError::msg(
                        format!("configuration file {} does not have a file name, cannot identify network name", cfg.path().to_string_lossy()),
                        )),
                };

                // Network DNS Servers were found while parsing config
                // lets populate the backend
                // Only if network is not internal.
                // If internal, explicitly insert empty list.
                if !parsed_network_config.network_dnsservers.is_empty() && !internal {
                    network_dns_server.insert(
                        network_name.clone(),
                        parsed_network_config.network_dnsservers,
                    );
                }
                if internal {
                    network_dns_server.insert(network_name.clone(), Vec::new());
                }

                for ip in parsed_network_config.network_bind_ip {
                    match ip {
                        IpAddr::V4(a) => listen_ips_4
                            .entry(network_name.clone())
                            .or_default()
                            .push(a),
                        IpAddr::V6(b) => listen_ips_6
                            .entry(network_name.clone())
                            .or_default()
                            .push(b),
                    }
                }

                for entry in parsed_network_config.container_entry {
                    // Container network membership
                    let ctr_networks = network_membership.entry(entry.id.clone()).or_default();

                    // Keep the network deduplicated
                    if !ctr_networks.contains(&network_name) {
                        ctr_networks.push(network_name.clone());
                    }

                    // Container IP addresses
                    let mut new_ctr_ips: Vec<IpAddr> = Vec::new();
                    if let Some(v4) = entry.v4 {
                        for ip in v4 {
                            reverse
                                .entry(network_name.clone())
                                .or_default()
                                .entry(IpAddr::V4(ip))
                                .or_default()
                                .append(&mut entry.aliases.clone());
                            // DNS only accepted on non-internal networks.
                            if !internal {
                                ctr_dns_server.insert(IpAddr::V4(ip), entry.dns_servers.clone());
                            }
                            new_ctr_ips.push(IpAddr::V4(ip));
                        }
                    }
                    if let Some(v6) = entry.v6 {
                        for ip in v6 {
                            reverse
                                .entry(network_name.clone())
                                .or_default()
                                .entry(IpAddr::V6(ip))
                                .or_default()
                                .append(&mut entry.aliases.clone());
                            // DNS only accepted on non-internal networks.
                            if !internal {
                                ctr_dns_server.insert(IpAddr::V6(ip), entry.dns_servers.clone());
                            }
                            new_ctr_ips.push(IpAddr::V6(ip));
                        }
                    }

                    let ctr_ips = container_ips.entry(entry.id.clone()).or_default();
                    ctr_ips.append(&mut new_ctr_ips.clone());

                    // Network aliases to IPs map.
                    let network_aliases = network_names.entry(network_name.clone()).or_default();
                    for alias in entry.aliases {
                        let alias_entries = network_aliases.entry(alias).or_default();
                        alias_entries.append(&mut new_ctr_ips.clone());
                    }

                    network_is_internal.insert(network_name.clone(), internal);
                }
            }
            Err(e) => {
                if e.kind() != std::io::ErrorKind::NotFound {
                    error!("Error listing config file for server update: {}", e)
                }
            }
        }
    }

    // Set up types to be returned.
    let mut ctrs: HashMap<IpAddr, Vec<String>> = HashMap::new();

    for (ctr_id, ips) in container_ips {
        match network_membership.get(&ctr_id) {
            Some(s) => {
                for ip in ips {
                    let ip_networks = ctrs.entry(ip).or_default();
                    ip_networks.append(&mut s.clone());
                }
            }
            None => {
                return Err(AardvarkError::msg(format!(
                    "Container ID {} has an entry in IPs table, but not network membership table",
                    ctr_id
                )))
            }
        }
    }

    Ok((
        DNSBackend::new(
            ctrs,
            network_names,
            reverse,
            ctr_dns_server,
            network_dns_server,
            network_is_internal,
            filter_search_domain.to_owned(),
        ),
        listen_ips_4,
        listen_ips_6,
    ))
}

// A single entry in a config file
struct CtrEntry {
    id: String,
    v4: Option<Vec<Ipv4Addr>>,
    v6: Option<Vec<Ipv6Addr>>,
    aliases: Vec<String>,
    dns_servers: Option<Vec<IpAddr>>,
}

// A simplified type for results retured by
// parse_config after parsing a single network
// config.
struct ParsedNetworkConfig {
    network_bind_ip: Vec<IpAddr>,
    container_entry: Vec<CtrEntry>,
    network_dnsservers: Vec<IpAddr>,
}

// Read and parse a single given configuration file
fn parse_config(path: &std::path::Path) -> Result<ParsedNetworkConfig, std::io::Error> {
    let content = read_to_string(path)?;
    let mut is_first = true;

    let mut bind_addrs: Vec<IpAddr> = Vec::new();
    let mut network_dns_servers: Vec<IpAddr> = Vec::new();
    let mut ctrs: Vec<CtrEntry> = Vec::new();

    // Split on newline, parse each line
    for line in content.split('\n') {
        if line.is_empty() {
            continue;
        }
        if is_first {
            let network_parts = line.split(' ').collect::<Vec<&str>>();
            if network_parts.is_empty() {
                return Err(std::io::Error::new(
                    std::io::ErrorKind::Other,
                    format!("invalid network configuration file: {}", path.display()),
                ));
            }
            // process bind ip
            for ip in network_parts[0].split(',') {
                let local_ip = match ip.parse() {
                    Ok(l) => l,
                    Err(e) => {
                        return Err(std::io::Error::new(
                            std::io::ErrorKind::Other,
                            format!("error parsing ip address {}: {}", ip, e),
                        ))
                    }
                };
                bind_addrs.push(local_ip);
            }

            // If network parts contain more than one col then
            // we have custom dns server also defined at network level
            // lets process that.
            if network_parts.len() > 1 {
                for ip in network_parts[1].split(',') {
                    let local_ip = match ip.parse() {
                        Ok(l) => l,
                        Err(e) => {
                            return Err(std::io::Error::new(
                                std::io::ErrorKind::Other,
                                format!("error parsing network dns address {}: {}", ip, e),
                            ))
                        }
                    };
                    network_dns_servers.push(local_ip);
                }
            }

            is_first = false;
            continue;
        }

        // Split on space
        let parts = line.split(' ').collect::<Vec<&str>>();
        if parts.len() < 4 {
            return Err(std::io::Error::new(
                std::io::ErrorKind::InvalidData,
                format!(
                    "configuration file {} line {} is improperly formatted - too few entries",
                    path.to_string_lossy(),
                    line
                ),
            ));
        }

        let v4_addrs: Option<Vec<Ipv4Addr>> = if !parts[1].is_empty() {
            let ipv4 = match parts[1].split(',').map(|i| i.parse()).collect() {
                Ok(i) => i,
                Err(e) => {
                    return Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        format!("error parsing IP address {}: {}", parts[1], e),
                    ))
                }
            };
            Some(ipv4)
        } else {
            None
        };

        let v6_addrs: Option<Vec<Ipv6Addr>> = if !parts[2].is_empty() {
            let ipv6 = match parts[2].split(',').map(|i| i.parse()).collect() {
                Ok(i) => i,
                Err(e) => {
                    return Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        format!("error parsing IP address {}: {}", parts[2], e),
                    ))
                }
            };
            Some(ipv6)
        } else {
            None
        };

        let aliases: Vec<String> = parts[3]
            .split(',')
            .map(|x| x.to_string().to_lowercase())
            .collect::<Vec<String>>();

        if aliases.is_empty() {
            return Err(std::io::Error::new(
                std::io::ErrorKind::InvalidData,
                format!(
                    "configuration file {} line {} is improperly formatted - no names given",
                    path.to_string_lossy(),
                    line
                ),
            ));
        }

        let dns_servers: Option<Vec<IpAddr>> = if parts.len() == 5 && !parts[4].is_empty() {
            let dns_server = match parts[4].split(',').map(|i| i.parse()).collect() {
                Ok(i) => i,
                Err(e) => {
                    return Err(std::io::Error::new(
                        std::io::ErrorKind::Other,
                        format!("error parsing DNS server address {}: {}", parts[4], e),
                    ))
                }
            };
            Some(dns_server)
        } else {
            None
        };

        ctrs.push(CtrEntry {
            id: parts[0].to_string().to_lowercase(),
            v4: v4_addrs,
            v6: v6_addrs,
            aliases,
            dns_servers,
        });
    }

    // Must provide at least one bind address
    if bind_addrs.is_empty() {
        return Err(std::io::Error::new(
            std::io::ErrorKind::InvalidData,
            format!(
                "configuration file {} does not provide any bind addresses",
                path.to_string_lossy()
            ),
        ));
    }

    Ok(ParsedNetworkConfig {
        network_bind_ip: bind_addrs,
        container_entry: ctrs,
        network_dnsservers: network_dns_servers,
    })
}
07070100000031000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001C00000000aardvark-dns-1.14.0/src/dns07070100000032000081A400000000000000000000000167AA032200004568000000000000000000000000000000000000002700000000aardvark-dns-1.14.0/src/dns/coredns.rsuse crate::backend::DNSBackend;
use crate::error::AardvarkResult;
use arc_swap::ArcSwap;
use arc_swap::Guard;
use futures_util::StreamExt;
use futures_util::TryStreamExt;
use hickory_client::{client::AsyncClient, proto::xfer::SerialMessage, rr::rdata, rr::Name};
use hickory_proto::tcp::TcpClientStream;
use hickory_proto::{
    iocompat::AsyncIoTokioAsStd,
    op::{Message, MessageType, ResponseCode},
    rr::{DNSClass, RData, Record, RecordType},
    tcp::TcpStream,
    udp::{UdpClientStream, UdpStream},
    xfer::{dns_handle::DnsHandle, BufDnsStreamHandle, DnsRequest},
    DnsStreamHandle,
};
use log::{debug, error, trace, warn};
use std::io::Error;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::net::UdpSocket;

const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);

pub const DNS_PORT: u16 = 53;

pub struct CoreDns {
    rx: flume::Receiver<()>, // kill switch receiver
    inner: CoreDnsData,
}

#[derive(Clone)]
struct CoreDnsData {
    network_name: String,                     // raw network name
    backend: &'static ArcSwap<DNSBackend>,    // server's data store
    no_proxy: bool,                           // do not forward to external resolvers
    nameservers: Arc<Mutex<Vec<SocketAddr>>>, // host nameservers from resolv.conf
}

enum Protocol {
    Udp,
    Tcp,
}

impl CoreDns {
    // Most of the arg can be removed in design refactor.
    // so dont create a struct for this now.
    pub fn new(
        network_name: String,
        backend: &'static ArcSwap<DNSBackend>,
        rx: flume::Receiver<()>,
        no_proxy: bool,
        nameservers: Arc<Mutex<Vec<SocketAddr>>>,
    ) -> Self {
        CoreDns {
            rx,
            inner: CoreDnsData {
                network_name,
                backend,
                no_proxy,
                nameservers,
            },
        }
    }

    pub async fn run(
        &self,
        udp_socket: UdpSocket,
        tcp_listener: TcpListener,
    ) -> AardvarkResult<()> {
        let address = udp_socket.local_addr()?;
        let (mut receiver, sender_original) = UdpStream::with_bound(udp_socket, address);

        loop {
            tokio::select! {
                _ = self.rx.recv_async() => {
                    break;
                },
                v = receiver.next() => {
                    let msg_received = match v {
                        Some(value) => value,
                        None => {
                            // None received, nothing to process so continue
                            debug!("None recevied from stream, continue the loop");
                            continue;
                        }
                    };
                    Self::process_message(&self.inner, msg_received, &sender_original, Protocol::Udp).await;
                },
                res = tcp_listener.accept() => {
                    match res {
                        Ok((sock,addr)) => {
                            tokio::spawn(Self::process_tcp_stream(self.inner.clone(), sock, addr));
                        }
                        Err(e) => {
                            error!("Failed to accept new tcp connection: {e}");
                            break;
                        }
                    }
                }
            }
        }
        Ok(())
    }

    async fn process_tcp_stream(
        data: CoreDnsData,
        stream: tokio::net::TcpStream,
        peer: SocketAddr,
    ) {
        let (mut hickory_stream, sender_original) =
            TcpStream::from_stream(AsyncIoTokioAsStd(stream), peer);

        // It is possible for a client to keep the tcp socket open forever and never send any data,
        // we do not want this so add a 3s timeout then we close the socket.
        match tokio::time::timeout(Duration::from_secs(3), hickory_stream.next()).await {
            Ok(message) => {
                if let Some(msg) = message {
                    Self::process_message(&data, msg, &sender_original, Protocol::Tcp).await;
                    // The API is a bit strange, first time we call next we get the message,
                    // but we must call again to send our reply back
                    hickory_stream.next().await;
                }
            }
            Err(_) => debug!(
                "Tcp connection {} was cancelled after 3s as it took to long to receive message",
                peer
            ),
        }
    }

    async fn process_message(
        data: &CoreDnsData,
        msg_received: Result<SerialMessage, Error>,
        sender_original: &BufDnsStreamHandle,
        proto: Protocol,
    ) {
        let msg = match msg_received {
            Ok(msg) => msg,
            Err(e) => {
                error!("Error parsing dns message {:?}", e);
                return;
            }
        };
        let backend = data.backend.load();
        let src_address = msg.addr();
        let mut sender = sender_original.with_remote_addr(src_address);
        let (request_name, record_type, mut req) = match parse_dns_msg(msg) {
            Some((name, record_type, req)) => (name, record_type, req),
            _ => {
                error!("None received while parsing dns message, this is not expected server will ignore this message");
                return;
            }
        };
        let request_name_string = request_name.to_string();

        // Create debug and trace info for key parameters.
        trace!("server network name: {:?}", data.network_name);
        debug!("request source address: {:?}", src_address);
        trace!("requested record type: {:?}", record_type);
        debug!(
            "checking if backend has entry for: {:?}",
            &request_name_string
        );
        trace!("server backend.name_mappings: {:?}", backend.name_mappings);
        trace!("server backend.ip_mappings: {:?}", backend.ip_mappings);

        match record_type {
            RecordType::PTR => {
                if let Some(msg) = reply_ptr(&request_name_string, &backend, src_address, &req) {
                    reply(&mut sender, src_address, &msg);
                    return;
                }
                // No match found, forwarding below.
            }
            RecordType::A | RecordType::AAAA => {
                if let Some(msg) = reply_ip(
                    &request_name_string,
                    &request_name,
                    &data.network_name,
                    record_type,
                    &backend,
                    src_address,
                    &mut req,
                ) {
                    reply(&mut sender, src_address, msg);
                    return;
                }
                // No match found, forwarding below.
            }

            // TODO: handle MX here like docker does

            // We do not handle this request type so do nothing,
            // we forward the request to upstream resolvers below.
            _ => {}
        };

        // are we allowed to forward?
        if data.no_proxy
            || backend.ctr_is_internal(&src_address.ip())
            || request_name_string.ends_with(&backend.search_domain)
        {
            let mut nx_message = req.clone();
            nx_message.set_response_code(ResponseCode::NXDomain);
            reply(&mut sender, src_address, &nx_message);
        } else {
            debug!(
                "Forwarding dns request for {} type: {}",
                &request_name_string, record_type
            );
            let mut nameservers = Vec::new();
            // Add resolvers configured for container
            if let Some(Some(dns_servers)) = backend.ctr_dns_server.get(&src_address.ip()) {
                for dns_server in dns_servers.iter() {
                    nameservers.push(SocketAddr::new(*dns_server, DNS_PORT));
                }
                // Add network scoped resolvers only if container specific resolvers were not configured
            } else if let Some(network_dns_servers) =
                backend.get_network_scoped_resolvers(&src_address.ip())
            {
                for dns_server in network_dns_servers.iter() {
                    nameservers.push(SocketAddr::new(*dns_server, DNS_PORT));
                }
            }
            // Use host resolvers if no custom resolvers are set for the container.
            if nameservers.is_empty() {
                nameservers.clone_from(&data.nameservers.lock().expect("lock nameservers"));
            }

            match proto {
                Protocol::Udp => {
                    tokio::spawn(Self::forward_to_servers(
                        nameservers,
                        sender,
                        src_address,
                        req,
                        proto,
                    ));
                }
                Protocol::Tcp => {
                    // we already spawned a new future when we read the message so there is no need to spawn another one
                    Self::forward_to_servers(nameservers, sender, src_address, req, proto).await;
                }
            }
        }
    }

    async fn forward_to_servers(
        nameservers: Vec<SocketAddr>,
        mut sender: BufDnsStreamHandle,
        src_address: SocketAddr,
        req: Message,
        proto: Protocol,
    ) {
        let mut timeout = DEFAULT_TIMEOUT;
        // Remember do not divide by 0.
        if !nameservers.is_empty() {
            timeout = Duration::from_secs(5) / nameservers.len() as u32
        }
        // forward dns request to hosts's /etc/resolv.conf
        for addr in nameservers {
            let (client, handle) = match proto {
                Protocol::Udp => {
                    let stream = UdpClientStream::<UdpSocket>::with_timeout(addr, timeout);
                    let (cl, bg) = match AsyncClient::connect(stream).await {
                        Ok(a) => a,
                        Err(e) => {
                            debug!("Failed to connect to {addr}: {e}");
                            continue;
                        }
                    };
                    let handle = tokio::spawn(bg);
                    (cl, handle)
                }
                Protocol::Tcp => {
                    let (stream, sender) = TcpClientStream::<
                        AsyncIoTokioAsStd<tokio::net::TcpStream>,
                    >::with_timeout(addr, timeout);
                    let (cl, bg) =
                        match AsyncClient::with_timeout(stream, sender, timeout, None).await {
                            Ok(a) => a,
                            Err(e) => {
                                debug!("Failed to connect to {addr}: {e}");
                                continue;
                            }
                        };
                    let handle = tokio::spawn(bg);
                    (cl, handle)
                }
            };

            if let Some(resp) = forward_dns_req(client, req.clone()).await {
                if reply(&mut sender, src_address, &resp).is_some() {
                    // request resolved from following resolver so
                    // break and don't try other resolvers
                    break;
                }
            }
            handle.abort();
        }
    }
}

fn reply(sender: &mut BufDnsStreamHandle, socket_addr: SocketAddr, msg: &Message) -> Option<()> {
    let id = msg.id();
    let mut msg_mut = msg.clone();
    msg_mut.set_message_type(MessageType::Response);
    // If `RD` is set and `RA` is false set `RA`.
    if msg.recursion_desired() && !msg.recursion_available() {
        msg_mut.set_recursion_available(true);
    }
    let response = SerialMessage::new(msg_mut.to_vec().ok()?, socket_addr);

    match sender.send(response) {
        Ok(_) => {
            debug!("[{}] success reponse", id);
        }
        Err(e) => {
            error!("[{}] fail response: {:?}", id, e);
        }
    }

    Some(())
}

fn parse_dns_msg(body: SerialMessage) -> Option<(Name, RecordType, Message)> {
    match Message::from_vec(body.bytes()) {
        Ok(msg) => {
            let mut name = Name::default();
            let mut record_type: RecordType = RecordType::A;

            let parsed_msg = format!(
                "[{}] parsed message body: {} edns: {}",
                msg.id(),
                msg.queries()
                    .first()
                    .map(|q| {
                        name = q.name().clone();
                        record_type = q.query_type();

                        format!("{} {} {}", q.name(), q.query_type(), q.query_class(),)
                    })
                    .unwrap_or_else(Default::default,),
                msg.extensions().is_some(),
            );

            debug!("parsed message {:?}", parsed_msg);

            Some((name, record_type, msg))
        }
        Err(e) => {
            warn!("Failed while parsing message: {}", e);
            None
        }
    }
}

async fn forward_dns_req(cl: AsyncClient, message: Message) -> Option<Message> {
    let req = DnsRequest::new(message, Default::default());
    let id = req.id();

    match cl.send(req).try_next().await {
        Ok(Some(response)) => {
            for answer in response.answers() {
                debug!(
                    "{} {} {} {} => {:#?}",
                    id,
                    answer.name().to_string(),
                    answer.record_type(),
                    answer.dns_class(),
                    answer.data(),
                );
            }
            let mut response_message = response.into_message();
            response_message.set_id(id);
            Some(response_message)
        }
        Ok(None) => {
            error!("{} dns request got empty response", id);
            None
        }
        Err(e) => {
            error!("{} dns request failed: {}", id, e);
            None
        }
    }
}

fn reply_ptr(
    name: &str,
    backend: &Guard<Arc<DNSBackend>>,
    src_address: SocketAddr,
    req: &Message,
) -> Option<Message> {
    let ptr_lookup_ip: String;
    // Are we IPv4 or IPv6?

    match name.strip_suffix(".in-addr.arpa.") {
        Some(n) => ptr_lookup_ip = n.split('.').rev().collect::<Vec<&str>>().join("."),
        None => {
            // not ipv4
            match name.strip_suffix(".ip6.arpa.") {
                Some(n) => {
                    // ipv6 string is 39 chars max
                    let mut tmp_ip = String::with_capacity(40);
                    for (i, c) in n.split('.').rev().enumerate() {
                        tmp_ip.push_str(c);
                        // insert colon after 4 hex chars but not at the end
                        if i % 4 == 3 && i < 31 {
                            tmp_ip.push(':');
                        }
                    }
                    ptr_lookup_ip = tmp_ip;
                }
                // neither ipv4 or ipv6, something we do not understand
                None => return None,
            }
        }
    }

    trace!("Performing reverse lookup for ip: {}", &ptr_lookup_ip);

    // We should probably log malformed queries, but for now if-let should be fine.
    if let Ok(lookup_ip) = ptr_lookup_ip.parse() {
        if let Some(reverse_lookup) = backend.reverse_lookup(&src_address.ip(), &lookup_ip) {
            let mut req_clone = req.clone();
            for entry in reverse_lookup {
                if let Ok(answer) = Name::from_ascii(format!("{}.", entry)) {
                    let mut record = Record::new();
                    record
                        .set_name(Name::from_str_relaxed(name).unwrap_or_default())
                        .set_rr_type(RecordType::PTR)
                        .set_dns_class(DNSClass::IN)
                        .set_data(Some(RData::PTR(rdata::PTR(answer))));
                    req_clone.add_answer(record);
                }
            }
            return Some(req_clone);
        }
    };
    None
}

fn reply_ip<'a>(
    name: &str,
    request_name: &Name,
    network_name: &str,
    record_type: RecordType,
    backend: &Guard<Arc<DNSBackend>>,
    src_address: SocketAddr,
    req: &'a mut Message,
) -> Option<&'a Message> {
    // attempt intra network resolution
    let resolved_ip_list = backend.lookup(&src_address.ip(), network_name, name)?;

    if record_type == RecordType::A {
        for record_addr in resolved_ip_list {
            if let IpAddr::V4(ipv4) = record_addr {
                let mut record = Record::new();
                // DO NOT SET A TTL, the default is 0 which means client should not cache it.
                // Containers can be be restarted with a different ip at any time so allowing
                // caches here doesn't make much sense given the server is local and queries
                // should be fast enough anyway.
                record
                    .set_name(request_name.clone())
                    .set_rr_type(RecordType::A)
                    .set_dns_class(DNSClass::IN)
                    .set_data(Some(RData::A(rdata::A(ipv4))));
                req.add_answer(record);
            }
        }
    } else if record_type == RecordType::AAAA {
        for record_addr in resolved_ip_list {
            if let IpAddr::V6(ipv6) = record_addr {
                let mut record = Record::new();
                record
                    .set_name(request_name.clone())
                    .set_rr_type(RecordType::AAAA)
                    .set_dns_class(DNSClass::IN)
                    .set_data(Some(RData::AAAA(rdata::AAAA(ipv6))));
                req.add_answer(record);
            }
        }
    }
    Some(req)
}
07070100000033000081A400000000000000000000000167AA032200000011000000000000000000000000000000000000002300000000aardvark-dns-1.14.0/src/dns/mod.rspub mod coredns;
07070100000034000081A400000000000000000000000167AA032200000B1C000000000000000000000000000000000000002100000000aardvark-dns-1.14.0/src/error.rsuse std::fmt;

pub type AardvarkResult<T> = Result<T, AardvarkError>;

#[derive(Debug)]
pub enum AardvarkError {
    Message(String),
    IOError(std::io::Error),
    Chain(String, Box<Self>),
    List(AardvarkErrorList),
    AddrParseError(std::net::AddrParseError),
}

impl AardvarkError {
    pub fn msg<S>(msg: S) -> Self
    where
        S: Into<String>,
    {
        Self::Message(msg.into())
    }

    pub fn wrap<S>(msg: S, chained: Self) -> Self
    where
        S: Into<String>,
    {
        Self::Chain(msg.into(), Box::new(chained))
    }
}

pub trait AardvarkWrap<T, E> {
    /// Wrap the error value with additional context.
    fn wrap<C>(self, context: C) -> AardvarkResult<T>
    where
        C: Into<String>,
        E: Into<AardvarkError>;
}

impl<T, E> AardvarkWrap<T, E> for Result<T, E>
where
    E: Into<AardvarkError>,
{
    fn wrap<C>(self, msg: C) -> AardvarkResult<T>
    where
        C: Into<String>,
        E: Into<AardvarkError>,
    {
        // Not using map_err to save 2 useless frames off the captured backtrace
        // in ext_context.
        match self {
            Ok(ok) => Ok(ok),
            Err(error) => Err(AardvarkError::wrap(msg, error.into())),
        }
    }
}

impl fmt::Display for AardvarkError {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        match self {
            Self::Message(s) => write!(f, "{s}"),
            Self::Chain(s, e) => write!(f, "{s}: {e}"),
            Self::IOError(e) => write!(f, "IO error: {e}"),
            Self::AddrParseError(e) => write!(f, "parse address: {e}"),
            Self::List(list) => {
                // some extra code to only add \n when it contains multiple errors
                let mut iter = list.0.iter();
                if let Some(first) = iter.next() {
                    write!(f, "{first}")?;
                }
                for err in iter {
                    write!(f, "\n{err}")?;
                }
                Ok(())
            }
        }
    }
}

impl From<std::io::Error> for AardvarkError {
    fn from(err: std::io::Error) -> Self {
        Self::IOError(err)
    }
}

impl From<nix::Error> for AardvarkError {
    fn from(err: nix::Error) -> Self {
        Self::IOError(err.into())
    }
}

impl From<std::net::AddrParseError> for AardvarkError {
    fn from(err: std::net::AddrParseError) -> Self {
        Self::AddrParseError(err)
    }
}

#[derive(Debug)]
pub struct AardvarkErrorList(Vec<AardvarkError>);

impl AardvarkErrorList {
    pub fn new() -> Self {
        Self(vec![])
    }

    pub fn push(&mut self, err: AardvarkError) {
        self.0.push(err)
    }

    pub fn is_empty(&self) -> bool {
        self.0.is_empty()
    }
}

// we do not need it but clippy wants it
impl Default for AardvarkErrorList {
    fn default() -> Self {
        Self::new()
    }
}
07070100000035000081A400000000000000000000000167AA03220000005F000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/src/lib.rspub mod backend;
pub mod commands;
pub mod config;
pub mod dns;
pub mod error;
pub mod server;
07070100000036000081A400000000000000000000000167AA0322000009F2000000000000000000000000000000000000002000000000aardvark-dns-1.14.0/src/main.rsuse std::env;
use std::str::FromStr;

use clap::{Parser, Subcommand};

use aardvark_dns::commands::{run, version};
use log::Level;
use syslog::{BasicLogger, Facility, Formatter3164};

#[derive(Parser, Debug)]
#[clap(version = env!("CARGO_PKG_VERSION"))]
struct Opts {
    /// Path to configuration directory
    #[clap(short, long)]
    config: Option<String>,
    /// Host port for aardvark servers, defaults to 5533
    #[clap(short, long)]
    port: Option<u16>,
    /// Filters search domain for backward compatiblity with dnsname/dnsmasq
    #[clap(short, long)]
    filter_search_domain: Option<String>,
    /// Aardvark-dns trig command
    #[clap(subcommand)]
    subcmd: SubCommand,
}

#[derive(Subcommand, Debug)]
enum SubCommand {
    /// Runs the aardvark dns server with the specified configuration directory.
    Run(run::Run),
    /// Display info about aardvark.
    Version(version::Version),
}

fn main() {
    let formatter = Formatter3164 {
        facility: Facility::LOG_USER,
        hostname: None,
        process: "aardvark-dns".into(),
        pid: 0,
    };

    let log_level = match env::var("RUST_LOG") {
        Ok(val) => match Level::from_str(&val) {
            Ok(level) => level,
            Err(e) => {
                eprintln!("failed to parse RUST_LOG level: {}", e);
                Level::Info
            }
        },
        // if env is not set default to info
        Err(_) => Level::Info,
    };

    // On error do nothing, running on system without syslog is fine and we should not clutter
    // logs with meaningless errors, https://github.com/containers/podman/issues/19809.
    if let Ok(logger) = syslog::unix(formatter) {
        if let Err(e) = log::set_boxed_logger(Box::new(BasicLogger::new(logger)))
            .map(|()| log::set_max_level(log_level.to_level_filter()))
        {
            eprintln!("failed to initialize syslog logger: {}", e)
        };
    }

    let opts = Opts::parse();

    let dir = opts.config.unwrap_or_else(|| String::from("/dev/stdin"));
    let port = opts.port.unwrap_or(5533_u16);
    let filter_search_domain = opts
        .filter_search_domain
        .unwrap_or_else(|| String::from(".dns.podman"));
    let result = match opts.subcmd {
        SubCommand::Run(run) => run.exec(dir, port, filter_search_domain),
        SubCommand::Version(version) => version.exec(),
    };

    match result {
        Ok(_) => {}
        Err(err) => {
            eprintln!("{err}");
            std::process::exit(1);
        }
    }
}

#[cfg(test)]
mod test;
07070100000037000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001F00000000aardvark-dns-1.14.0/src/server07070100000038000081A400000000000000000000000167AA032200000042000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/src/server/mod.rs// Serve DNS requests on the given bind addresses.
pub mod serve;
07070100000039000081A400000000000000000000000167AA0322000043EC000000000000000000000000000000000000002800000000aardvark-dns-1.14.0/src/server/serve.rsuse crate::backend::DNSBackend;
use crate::config::constants::AARDVARK_PID_FILE;
use crate::config::parse_configs;
use crate::dns::coredns::CoreDns;
use crate::dns::coredns::DNS_PORT;
use crate::error::AardvarkError;
use crate::error::AardvarkErrorList;
use crate::error::AardvarkResult;
use crate::error::AardvarkWrap;
use arc_swap::ArcSwap;
use log::{debug, error, info};
use nix::unistd;
use nix::unistd::dup2;
use std::collections::HashMap;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::fs::OpenOptions;
use std::hash::Hash;
use std::io::Error;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::os::fd::AsRawFd;
use std::os::fd::OwnedFd;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::OnceLock;
use tokio::net::{TcpListener, UdpSocket};
use tokio::signal::unix::{signal, SignalKind};
use tokio::task::JoinHandle;

use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::process;

type ThreadHandleMap<Ip> =
    HashMap<(String, Ip), (flume::Sender<()>, JoinHandle<AardvarkResult<()>>)>;

pub fn create_pid(config_path: &str) -> Result<(), std::io::Error> {
    // before serving write its pid to _config_path so other process can notify
    // aardvark of data change.
    let path = Path::new(config_path).join(AARDVARK_PID_FILE);
    let mut pid_file = match File::create(path) {
        Err(err) => {
            return Err(std::io::Error::new(
                std::io::ErrorKind::Other,
                format!("Unable to get process pid: {}", err),
            ));
        }
        Ok(file) => file,
    };

    let server_pid = process::id().to_string();
    if let Err(err) = pid_file.write_all(server_pid.as_bytes()) {
        return Err(std::io::Error::new(
            std::io::ErrorKind::Other,
            format!("Unable to write pid to file: {}", err),
        ));
    }

    Ok(())
}

#[tokio::main]
pub async fn serve(
    config_path: &str,
    port: u16,
    filter_search_domain: &str,
    ready: OwnedFd,
) -> AardvarkResult<()> {
    let mut signals = signal(SignalKind::hangup())?;
    let no_proxy: bool = env::var("AARDVARK_NO_PROXY").is_ok();

    let mut handles_v4 = HashMap::new();
    let mut handles_v6 = HashMap::new();
    let nameservers = Arc::new(Mutex::new(Vec::new()));

    read_config_and_spawn(
        config_path,
        port,
        filter_search_domain,
        &mut handles_v4,
        &mut handles_v6,
        nameservers.clone(),
        no_proxy,
    )
    .await?;
    // We are ready now, this is far from perfect we should at least wait for the first bind
    // to work but this is not really possible with the current code flow and needs more changes.
    daemonize()?;
    let msg: [u8; 1] = [b'1'];
    unistd::write(&ready, &msg)?;
    drop(ready);

    loop {
        // Block until we receive a SIGHUP.
        signals.recv().await;
        debug!("Received SIGHUP");
        if let Err(e) = read_config_and_spawn(
            config_path,
            port,
            filter_search_domain,
            &mut handles_v4,
            &mut handles_v6,
            nameservers.clone(),
            no_proxy,
        )
        .await
        {
            // do not exit here, we just keep running even if something failed
            error!("{e}");
        };
    }
}

/// # Ensure the expected DNS server threads are running
///
/// Stop threads corresponding to listen IPs no longer in the configuration and start threads
/// corresponding to listen IPs that were added.
async fn stop_and_start_threads<Ip>(
    port: u16,
    backend: &'static ArcSwap<DNSBackend>,
    listen_ips: HashMap<String, Vec<Ip>>,
    thread_handles: &mut ThreadHandleMap<Ip>,
    no_proxy: bool,
    nameservers: Arc<Mutex<Vec<SocketAddr>>>,
) -> AardvarkResult<()>
where
    Ip: Eq + Hash + Copy + Into<IpAddr> + Send + 'static,
{
    let mut expected_threads = HashSet::new();
    for (network_name, listen_ip_list) in listen_ips {
        for ip in listen_ip_list {
            expected_threads.insert((network_name.clone(), ip));
        }
    }

    // First we shut down any old threads that should no longer be running.  This should be
    // done before starting new ones in case a listen IP was moved from being under one network
    // name to another.
    let to_shut_down: Vec<_> = thread_handles
        .keys()
        .filter(|k| !expected_threads.contains(k))
        .cloned()
        .collect();
    stop_threads(thread_handles, Some(to_shut_down)).await;

    // Then we start any new threads.
    let to_start: Vec<_> = expected_threads
        .iter()
        .filter(|k| !thread_handles.contains_key(*k))
        .cloned()
        .collect();

    let mut errors = AardvarkErrorList::new();

    for (network_name, ip) in to_start {
        let (shutdown_tx, shutdown_rx) = flume::bounded(0);
        let network_name_ = network_name.clone();
        let ns = nameservers.clone();
        let addr = SocketAddr::new(ip.into(), port);
        let udp_sock = match UdpSocket::bind(addr).await {
            Ok(s) => s,
            Err(err) => {
                errors.push(AardvarkError::wrap(
                    format!("failed to bind udp listener on {addr}"),
                    err.into(),
                ));
                continue;
            }
        };

        let tcp_sock = match TcpListener::bind(addr).await {
            Ok(s) => s,
            Err(err) => {
                errors.push(AardvarkError::wrap(
                    format!("failed to bind tcp listener on {addr}"),
                    err.into(),
                ));
                continue;
            }
        };

        let handle = tokio::spawn(async move {
            start_dns_server(
                network_name_,
                udp_sock,
                tcp_sock,
                backend,
                shutdown_rx,
                no_proxy,
                ns,
            )
            .await
        });

        thread_handles.insert((network_name, ip), (shutdown_tx, handle));
    }

    if errors.is_empty() {
        return Ok(());
    }

    Err(AardvarkError::List(errors))
}

/// # Stop DNS server threads
///
/// If the `filter` parameter is `Some` only threads in the filter `Vec` will be stopped.
async fn stop_threads<Ip>(
    thread_handles: &mut ThreadHandleMap<Ip>,
    filter: Option<Vec<(String, Ip)>>,
) where
    Ip: Eq + Hash + Copy,
{
    let mut handles = Vec::new();

    let to_shut_down: Vec<_> = filter.unwrap_or_else(|| thread_handles.keys().cloned().collect());

    for key in to_shut_down {
        let (tx, handle) = thread_handles.remove(&key).unwrap();
        handles.push(handle);
        drop(tx);
    }

    for handle in handles {
        match handle.await {
            Ok(res) => {
                // result returned by the future, i.e. that actual
                // result from start_dns_server()
                if let Err(e) = res {
                    error!("Error from dns server: {}", e)
                }
            }
            // error from tokio itself
            Err(e) => error!("Error from dns server task: {}", e),
        }
    }
}

async fn start_dns_server(
    name: String,
    udp_socket: UdpSocket,
    tcp_socket: TcpListener,
    backend: &'static ArcSwap<DNSBackend>,
    rx: flume::Receiver<()>,
    no_proxy: bool,
    nameservers: Arc<Mutex<Vec<SocketAddr>>>,
) -> AardvarkResult<()> {
    let server = CoreDns::new(name, backend, rx, no_proxy, nameservers);
    server
        .run(udp_socket, tcp_socket)
        .await
        .wrap("run dns server")
}

async fn read_config_and_spawn(
    config_path: &str,
    port: u16,
    filter_search_domain: &str,
    handles_v4: &mut ThreadHandleMap<Ipv4Addr>,
    handles_v6: &mut ThreadHandleMap<Ipv6Addr>,
    nameservers: Arc<Mutex<Vec<SocketAddr>>>,
    no_proxy: bool,
) -> AardvarkResult<()> {
    let (conf, listen_ip_v4, listen_ip_v6) =
        parse_configs(config_path, filter_search_domain).wrap("unable to parse config")?;

    // We store the `DNSBackend` in an `ArcSwap` so we can replace it when the configuration is
    // reloaded.
    static DNSBACKEND: OnceLock<ArcSwap<DNSBackend>> = OnceLock::new();
    let backend = match DNSBACKEND.get() {
        Some(b) => {
            b.store(Arc::new(conf));
            b
        }
        None => DNSBACKEND.get_or_init(|| ArcSwap::from(Arc::new(conf))),
    };

    debug!("Successfully parsed config");
    debug!("Listen v4 ip {:?}", listen_ip_v4);
    debug!("Listen v6 ip {:?}", listen_ip_v6);

    // kill server if listen_ip's are empty
    if listen_ip_v4.is_empty() && listen_ip_v6.is_empty() {
        info!("No configuration found stopping the sever");

        let path = Path::new(config_path).join(AARDVARK_PID_FILE);
        if let Err(err) = fs::remove_file(path) {
            error!("failed to remove the pid file: {}", &err);
            process::exit(1);
        }

        // Gracefully stop all server threads first.
        stop_threads(handles_v4, None).await;
        stop_threads(handles_v6, None).await;

        process::exit(0);
    }

    let mut errors = AardvarkErrorList::new();

    // get host nameservers
    let upstream_resolvers = match get_upstream_resolvers() {
        Ok(ns) => ns,
        Err(err) => {
            errors.push(AardvarkError::wrap(
                "failed to get upstream nameservers, dns forwarding will not work",
                err,
            ));
            Vec::new()
        }
    };
    debug!("Using the following upstream servers: {upstream_resolvers:?}");

    {
        // use new scope to only lock for a short time
        *nameservers.lock().expect("lock nameservers") = upstream_resolvers;
    }

    if let Err(err) = stop_and_start_threads(
        port,
        backend,
        listen_ip_v4,
        handles_v4,
        no_proxy,
        nameservers.clone(),
    )
    .await
    {
        errors.push(err)
    };

    if let Err(err) = stop_and_start_threads(
        port,
        backend,
        listen_ip_v6,
        handles_v6,
        no_proxy,
        nameservers,
    )
    .await
    {
        errors.push(err)
    };

    if errors.is_empty() {
        return Ok(());
    }

    Err(AardvarkError::List(errors))
}

// creates new session and put /dev/null on the stdio streams
fn daemonize() -> Result<(), Error> {
    // remove any controlling terminals
    // but don't hardstop if this fails
    let _ = unsafe { libc::setsid() }; // check https://docs.rs/libc
                                       // close fds -> stdout, stdin and stderr
    let dev_null = OpenOptions::new()
        .read(true)
        .write(true)
        .open("/dev/null")
        .map_err(|e| std::io::Error::new(e.kind(), format!("/dev/null: {:#}", e)))?;
    // redirect stdout, stdin and stderr to /dev/null
    let fd = dev_null.as_raw_fd();
    let _ = dup2(fd, 0);
    let _ = dup2(fd, 1);
    let _ = dup2(fd, 2);
    Ok(())
}

// read /etc/resolv.conf and return all nameservers
fn get_upstream_resolvers() -> AardvarkResult<Vec<SocketAddr>> {
    let mut f = File::open("/etc/resolv.conf").wrap("open resolv.conf")?;
    let mut buf = String::with_capacity(4096);
    f.read_to_string(&mut buf).wrap("read resolv.conf")?;

    parse_resolv_conf(&buf)
}

fn parse_resolv_conf(content: &str) -> AardvarkResult<Vec<SocketAddr>> {
    let mut nameservers = Vec::new();
    for line in content.split('\n') {
        // split of comments
        let line = match line.split_once(['#', ';']) {
            Some((f, _)) => f,
            None => line,
        };
        let mut line_parts = line.split_whitespace();
        match line_parts.next() {
            Some(first) => {
                if first == "nameserver" {
                    if let Some(ip) = line_parts.next() {
                        // split of zone, we do not support the link local zone currently with ipv6 addresses
                        let mut scope = None;
                        let ip = match ip.split_once("%") {
                            Some((ip, scope_name)) => {
                                // allow both interface names or static ids
                                let id = match scope_name.parse() {
                                    Ok(id) => id,
                                    Err(_) => nix::net::if_::if_nametoindex(scope_name)
                                        .wrap("resolve scope id")?,
                                };

                                scope = Some(id);
                                ip
                            }
                            None => ip,
                        };
                        let ip = ip.parse().wrap(ip)?;

                        let addr = match ip {
                            IpAddr::V4(ip) => {
                                if scope.is_some() {
                                    return Err(AardvarkError::msg(
                                        "scope id not supported for ipv4 address",
                                    ));
                                }
                                SocketAddr::V4(SocketAddrV4::new(ip, DNS_PORT))
                            }
                            IpAddr::V6(ip) => SocketAddr::V6(SocketAddrV6::new(
                                ip,
                                DNS_PORT,
                                0,
                                scope.unwrap_or(0),
                            )),
                        };

                        nameservers.push(addr);
                    }
                }
            }
            None => continue,
        }
    }

    // we do not have time to try many nameservers anyway so only use the first three
    nameservers.truncate(3);
    Ok(nameservers)
}

#[cfg(test)]
mod tests {
    use super::*;

    const IP_1_1_1_1: SocketAddr =
        SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), DNS_PORT));
    const IP_1_1_1_2: SocketAddr =
        SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 2), DNS_PORT));
    const IP_1_1_1_3: SocketAddr =
        SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 3), DNS_PORT));

    /// fdfd:733b:dc3:220b::2
    const IP_FDFD_733B_DC3_220B_2: SocketAddr = SocketAddr::V6(SocketAddrV6::new(
        Ipv6Addr::new(0xfdfd, 0x733b, 0xdc3, 0x220b, 0, 0, 0, 2),
        DNS_PORT,
        0,
        0,
    ));

    /// fe80::1%lo
    const IP_FE80_1: SocketAddr = SocketAddr::V6(SocketAddrV6::new(
        Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 1),
        DNS_PORT,
        0,
        1,
    ));

    #[test]
    fn test_parse_resolv_conf() {
        let res = parse_resolv_conf("nameserver 1.1.1.1").expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1]);
    }

    #[test]
    fn test_parse_resolv_conf_multiple() {
        let res = parse_resolv_conf(
            "nameserver 1.1.1.1
nameserver 1.1.1.2
nameserver 1.1.1.3",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1, IP_1_1_1_2, IP_1_1_1_3]);
    }

    #[test]
    fn test_parse_resolv_conf_search_and_options() {
        let res = parse_resolv_conf(
            "nameserver 1.1.1.1
nameserver 1.1.1.2
nameserver 1.1.1.3
search test.podman
options rotate",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1, IP_1_1_1_2, IP_1_1_1_3]);
    }
    #[test]
    fn test_parse_resolv_conf_with_comment() {
        let res = parse_resolv_conf(
            "# mytest
            nameserver 1.1.1.1 # space
nameserver 1.1.1.2#nospace
     #leading spaces
nameserver 1.1.1.3",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1, IP_1_1_1_2, IP_1_1_1_3]);
    }

    #[test]
    fn test_parse_resolv_conf_with_invalid_content() {
        let res = parse_resolv_conf(
            "hey I am not known
nameserver 1.1.1.1
nameserver 1.1.1.2 somestuff here
abc
nameserver 1.1.1.3",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1, IP_1_1_1_2, IP_1_1_1_3]);
    }

    #[test]
    fn test_parse_resolv_conf_truncate_to_three() {
        let res = parse_resolv_conf(
            "nameserver 1.1.1.1
nameserver 1.1.1.2
nameserver 1.1.1.3
nameserver 1.1.1.4
nameserver 1.2.3.4",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_1_1_1_1, IP_1_1_1_2, IP_1_1_1_3]);
    }

    #[test]
    fn test_parse_resolv_conf_with_invalid_ip() {
        parse_resolv_conf("nameserver abc").expect_err("invalid ip must error");
    }

    #[test]
    fn test_parse_resolv_ipv6() {
        let res = parse_resolv_conf(
            "nameserver fdfd:733b:dc3:220b::2
nameserver 1.1.1.2",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_FDFD_733B_DC3_220B_2, IP_1_1_1_2]);
    }

    #[test]
    fn test_parse_resolv_ipv6_link_local_zone() {
        // Using lo here because we know that will always be id 1 and we
        // cannot assume any other interface name here.
        let res = parse_resolv_conf(
            "nameserver fe80::1%lo
",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_FE80_1]);
    }

    #[test]
    fn test_parse_resolv_ipv6_link_local_zone_id() {
        let res = parse_resolv_conf(
            "nameserver fe80::1%1
",
        )
        .expect("failed to parse");
        assert_eq!(res, vec![IP_FE80_1]);
    }
}
0707010000003A000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001D00000000aardvark-dns-1.14.0/src/test0707010000003B000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002400000000aardvark-dns-1.14.0/src/test/config0707010000003C000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000003E00000000aardvark-dns-1.14.0/src/test/config/network_scoped_custom_dns0707010000003D000081A400000000000000000000000167AA0322000001B9000000000000000000000000000000000000004500000000aardvark-dns-1.14.0/src/test/config/network_scoped_custom_dns/podman10.88.0.1 127.0.0.1,::2
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e43 10.88.0.2  condescendingnash 8.8.8.8
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e48 10.88.0.5  HelloWorld 3.3.3.3,1.1.1.1,::1
95655fb6832ba134efa66e9c80862a6c9b04f3cc6abf8adfdda8c38112c2c6fa 10.88.0.3  hopefulmontalcini,testdbctr
8bcc5fe0cb09bee5dfb71d61503a87688cfc82aa5f130bcedb19357a17765926 10.88.0.4  trustingzhukovsky,ctr1,ctra
0707010000003E000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000002B00000000aardvark-dns-1.14.0/src/test/config/podman0707010000003F000081A400000000000000000000000167AA03220000018F000000000000000000000000000000000000003200000000aardvark-dns-1.14.0/src/test/config/podman/podman10.88.0.1
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e43 10.88.0.2  condescendingnash
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e48 10.88.0.5  HelloWorld
95655fb6832ba134efa66e9c80862a6c9b04f3cc6abf8adfdda8c38112c2c6fa 10.88.0.3  hopefulmontalcini,testdbctr
8bcc5fe0cb09bee5dfb71d61503a87688cfc82aa5f130bcedb19357a17765926 10.88.0.4  trustingzhukovsky,ctr1,ctra
07070100000040000081A400000000000000000000000167AA03220000013B000000000000000000000000000000000000002C00000000aardvark-dns-1.14.0/src/test/config/podman210.88.0.1
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e43 10.88.0.2  condescending_nash
95655fb6832ba134efa66e9c80862a6c9b04f3cc6abf8adfdda8c38112c2c6fa 10.88.0.3  hopeful_montalcini,testdbctr
8bcc5fe0cb09bee5dfb71d61503a87688cfc82aa5f130bcedb19357a17765926 10.88.0.4  trusting_zhukovsky,ctr1,ctra
07070100000041000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000003600000000aardvark-dns-1.14.0/src/test/config/podman_bad_config07070100000042000081A400000000000000000000000167AA03220000008C000000000000000000000000000000000000003D00000000aardvark-dns-1.14.0/src/test/config/podman_bad_config/podman10.88.0.1 dfdsfds
10.88.0.2  condescendingnash
95655fb6832ba134efa66e9c80862a6c9b04f3cc6abf8adfdda8c38112c2c6fa hopefulmontalcini,testdbctr
07070100000043000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000003E00000000aardvark-dns-1.14.0/src/test/config/podman_custom_dns_servers07070100000044000081A400000000000000000000000167AA0322000001AB000000000000000000000000000000000000004500000000aardvark-dns-1.14.0/src/test/config/podman_custom_dns_servers/podman10.88.0.1
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e43 10.88.0.2  condescendingnash 8.8.8.8
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e48 10.88.0.5  HelloWorld 3.3.3.3,1.1.1.1,::1
95655fb6832ba134efa66e9c80862a6c9b04f3cc6abf8adfdda8c38112c2c6fa 10.88.0.3  hopefulmontalcini,testdbctr
8bcc5fe0cb09bee5dfb71d61503a87688cfc82aa5f130bcedb19357a17765926 10.88.0.4  trustingzhukovsky,ctr1,ctra
07070100000045000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000003600000000aardvark-dns-1.14.0/src/test/config/podman_v6_entries07070100000046000081A400000000000000000000000167AA03220000019E000000000000000000000000000000000000004800000000aardvark-dns-1.14.0/src/test/config/podman_v6_entries/podman_v6_entries10.89.0.1
7b46c7ad93fcbcb945c35286a5ba19d6976093e2ce39d2cb38ba1eba636404ab 10.89.0.2  test1,7b46c7ad93fc
7b46c7ad93fcbcb945c35286a5ba19d6976093e2ce39d2cb38ba1eba636404ab  fdfd:733b:dc3:220b::2 test1,7b46c7ad93fc
88dde8a2489780d3c8c90db54a9a97faf5dbe4f555b23e27880ca189dae0e2b0 10.89.0.3  test2,88dde8a24897
88dde8a2489780d3c8c90db54a9a97faf5dbe4f555b23e27880ca189dae0e2b0  fdfd:733b:dc3:220b::3 test2,88dde8a24897
07070100000047000081A400000000000000000000000167AA0322000000FE000000000000000000000000000000000000004F00000000aardvark-dns-1.14.0/src/test/config/podman_v6_entries/podman_v6_entries_proper10.0.0.1,10.0.1.1,fdfd::1,fddd::1
f35256b5e2f72ec8cb7d974d4f8841686fc8921fdfbc867285b50164e313f715 10.0.0.2,10.0.1.2 fdfd::2,fddd::2 testmulti1
e5df0cdbe0136a30cc3e848d495d2cc6dada25b7dedc776b4584ce2cbba6f06f 10.0.0.3,10.0.1.3 fdfd::3,fddd::3 testmulti2
07070100000048000081A400000000000000000000000167AA0322000000F6000000000000000000000000000000000000002A00000000aardvark-dns-1.14.0/src/test/config/test1fd35:fb67:49e1:349::1
68fb291b0318b54a71f6f3636e58bd0896f084e5ba4fa311ecf36e019c5e6e43  fd35:fb67:49e1:349::2 condescending_nash
8bcc5fe0cb09bee5dfb71d61503a87688cfc82aa5f130bcedb19357a17765926  fd35:fb67:49e1:349::3 trusting_zhukovsky,ctr1,ctra
07070100000049000081A400000000000000000000000167AA03220000000E000000000000000000000000000000000000002400000000aardvark-dns-1.14.0/src/test/mod.rspub mod test;
0707010000004A000081A400000000000000000000000167AA032200005455000000000000000000000000000000000000002500000000aardvark-dns-1.14.0/src/test/test.rs//use super::*;

#[cfg(test)]
// perform unit tests for config, backend and lookup logic
// following tests will not test server and event loop since
// event-loop and server can be tested via integration tests
mod tests {
    use std::collections::HashMap;
    use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};

    use aardvark_dns::backend::DNSBackend;
    use aardvark_dns::config;
    use aardvark_dns::error::AardvarkResult;
    use std::str::FromStr;

    const IP_10_88_0_2: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 88, 0, 2));
    const IP_10_88_0_4: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 88, 0, 4));
    const IP_10_88_0_5: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 88, 0, 5));

    const IP_10_89_0_2: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 89, 0, 2));
    const IP_10_89_0_3: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 89, 0, 3));

    /// fdfd:733b:dc3:220b::2
    const IP_FDFD_733B_DC3_220B_2: IpAddr =
        IpAddr::V6(Ipv6Addr::new(0xfdfd, 0x733b, 0xdc3, 0x220b, 0, 0, 0, 2));
    /// fdfd:733b:dc3:220b::3
    const IP_FDFD_733B_DC3_220B_3: IpAddr =
        IpAddr::V6(Ipv6Addr::new(0xfdfd, 0x733b, 0xdc3, 0x220b, 0, 0, 0, 3));

    fn parse_configs(
        dir: &str,
    ) -> AardvarkResult<(
        DNSBackend,
        HashMap<String, Vec<Ipv4Addr>>,
        HashMap<String, Vec<Ipv6Addr>>,
    )> {
        config::parse_configs(dir, "")
    }

    /* -------------------------------------------- */
    // --------- Test aardvark-dns config ---------
    /* -------------------------------------------- */
    #[test]
    // Test loading of config file from directory
    fn test_loading_config_file() {
        parse_configs("src/test/config/podman").unwrap();
    }
    #[test]
    // Test loading of config file from directory with custom DNS for containers
    fn test_loading_config_file_with_dns_servers() {
        parse_configs("src/test/config/podman_custom_dns_servers").unwrap();
    }
    #[test]
    // Test loading of config file from directory with custom DNS for containers
    // and custom DNS servers at network level as well.
    fn test_loading_config_file_with_network_scoped_dns_servers() {
        parse_configs("src/test/config/network_scoped_custom_dns").unwrap();
    }
    #[test]
    // Parse config files from stub data
    fn test_parsing_config_files() {
        match parse_configs("src/test/config/podman") {
            Ok((_, listen_ip_v4, _)) => {
                listen_ip_v4.contains_key("podman");
                assert_eq!(listen_ip_v4["podman"].len(), 1);
                assert_eq!("10.88.0.1".parse(), Ok(listen_ip_v4["podman"][0]));
            }
            Err(e) => panic!("{}", e),
        }
    }
    #[test]
    // Parse bad config files should not hard error
    fn test_parsing_bad_config_files() {
        parse_configs("src/test/config/podman_bad_config").expect("config parsing failed");
    }
    /* -------------------------------------------- */
    // -------Verify backend custom dns server ----
    /* -------------------------------------------- */
    #[test]
    // Backend must populate ctr_dns_servers via custom
    // DNS servers for container from the aardvark config
    fn test_backend_custom_dns_server() {
        match parse_configs("src/test/config/podman_custom_dns_servers") {
            Ok((backend, _, _)) => {
                // Should contain custom DNS server 8.8.8.8
                let mut dns_server = backend
                    .ctr_dns_server
                    .get(&IpAddr::V4(Ipv4Addr::new(10, 88, 0, 2)));
                let mut expected_dns_server = IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8));
                assert_eq!(dns_server.unwrap().clone().unwrap()[0], expected_dns_server);

                // Should contain custom DNS servers 3.3.3.3 and 1.1.1.1
                dns_server = backend
                    .ctr_dns_server
                    .get(&IpAddr::V4(Ipv4Addr::new(10, 88, 0, 5)));
                expected_dns_server = IpAddr::V4(Ipv4Addr::new(3, 3, 3, 3));
                assert_eq!(dns_server.unwrap().clone().unwrap()[0], expected_dns_server);
                expected_dns_server = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
                assert_eq!(dns_server.unwrap().clone().unwrap()[1], expected_dns_server);
                expected_dns_server = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
                assert_eq!(dns_server.unwrap().clone().unwrap()[2], expected_dns_server);

                // Shoudld not contain any DNS server
                dns_server = backend
                    .ctr_dns_server
                    .get(&IpAddr::V4(Ipv4Addr::new(10, 88, 0, 3)));
                assert_eq!(dns_server.unwrap().clone(), None);
            }
            Err(e) => panic!("{}", e),
        }
    }

    #[test]
    // Backend must populate ctr_dns_servers via custom
    // DNS servers for container from container entry and
    // network dns servers as well.
    fn test_backend_network_scoped_custom_dns_server() {
        match parse_configs("src/test/config/network_scoped_custom_dns") {
            Ok((backend, _, _)) => {
                let expected_dnsservers = vec![
                    IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
                    IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2)),
                ];
                let test_cases_source = ["10.88.0.2", "10.88.0.3", "10.88.0.4", "10.88.0.5"];
                // verify if network scoped resolvers for all the containers is equivalent to
                // expectedDNSServers
                for container in test_cases_source.iter() {
                    let output =
                        backend.get_network_scoped_resolvers(&IpAddr::from_str(container).unwrap());
                    let mut output_dnsservers = Vec::new();
                    for server in output.unwrap().iter() {
                        output_dnsservers.push(*server);
                    }
                    assert_eq!(expected_dnsservers, output_dnsservers);
                }
            }
            Err(e) => panic!("{}", e),
        }
    }

    /* -------------------------------------------- */
    // -------Test aardvark-dns lookup logic ------
    /* -------------------------------------------- */
    #[test]
    // Check lookup query from backend and simulate
    // dns request from same container to itself but
    // aardvark must return one ip address i.e v4.
    // Request address must be v4.
    // Same container --> (resolve) Same container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_same_container_request_from_v4_on_v4_entries() {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "condescendingnash");
        assert_eq!(res, Some(vec![IP_10_88_0_2]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // case-insensitive dns request from same container
    // to itself but aardvark must return one ip address i.e v4.
    // Request address must be v4.
    // Same container --> (resolve) Same container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_same_container_request_from_v4_on_v4_entries_case_insensitive(
    ) {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "helloworld");
        assert_eq!(res, Some(vec![IP_10_88_0_5]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // case-insensitive dns request from same container
    // to itself but aardvark must return one ip address i.e v4.
    // Request address must be v4.
    // Same container --> (resolve) Same container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_same_container_request_from_v4_on_v4_entries_case_insensitive_uppercase(
    ) {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "HELLOWORLD");
        assert_eq!(res, Some(vec![IP_10_88_0_5]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // nx_domain on bad lookup queries.
    fn test_lookup_queries_from_backend_simulate_nx_domain() {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "somebadquery");
        assert_eq!(res, None);
    }
    #[test]
    // Check lookup query from backend and simulate
    // dns request from same container to itself but
    // aardvark must return one ip address i.e v4.
    // Request address must be v4.
    // Same container --> (resolve) different container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_different_container_request_from_v4() {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "trustingzhukovsky");
        assert_eq!(res, Some(vec![IP_10_88_0_4]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // dns request from same container to itself but
    // aardvark must return one ip address i.e v4.
    // Request address must be v4.
    // Same container --> (resolve) different container name by alias --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_different_container_request_from_v4_by_alias() {
        let res = parse_configs("src/test/config/podman")
            .expect("parse config error")
            .0
            .lookup(&IP_10_88_0_2, "", "ctr1");
        assert_eq!(res, Some(vec![IP_10_88_0_4]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // dns request from same container to itself but
    // aardvark must return two ip address for v4 and v6.
    // Same container --> (resolve) Same container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_same_container_request_from_v4_and_v6_entries() {
        let conf = parse_configs("src/test/config/podman_v6_entries").expect("parse config error");
        assert!(conf.1.contains_key("podman_v6_entries"));
        assert!(!conf.2.contains_key("podman_v6_entries"));

        let ips = conf.0.lookup(&IP_10_89_0_2, "", "test1");
        assert_eq!(ips, Some(vec![IP_10_89_0_2, IP_FDFD_733B_DC3_220B_2]));
        let ips = conf.0.lookup(&IP_FDFD_733B_DC3_220B_2, "", "test1");
        assert_eq!(ips, Some(vec![IP_10_89_0_2, IP_FDFD_733B_DC3_220B_2]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // dns request from container to another container but
    // aardvark must return two ip address for v4 and v6.
    // Same container --> (resolve) different container name --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_different_container_request_from_v4_and_v6_entries(
    ) {
        let conf = parse_configs("src/test/config/podman_v6_entries").expect("parse config error");
        assert!(conf.1.contains_key("podman_v6_entries"));
        assert!(!conf.2.contains_key("podman_v6_entries"));

        let ips = conf.0.lookup(&IP_10_89_0_2, "", "test2");
        assert_eq!(ips, Some(vec![IP_10_89_0_3, IP_FDFD_733B_DC3_220B_3]));
        let ips = conf.0.lookup(&IP_FDFD_733B_DC3_220B_2, "", "test2");
        assert_eq!(ips, Some(vec![IP_10_89_0_3, IP_FDFD_733B_DC3_220B_3]));
    }
    #[test]
    // Check lookup query from backend and simulate
    // dns request from container to another container but
    // aardvark must return two ip address for v4 and v6.
    // Request address must be v6.
    // Same container --> (resolve) different container by id --> (on) Same Network
    fn test_lookup_queries_from_backend_simulate_different_container_request_by_id_from_v4_on_v6_and_v4_entries(
    ) {
        let conf = parse_configs("src/test/config/podman_v6_entries").expect("parse config error");
        assert!(conf.1.contains_key("podman_v6_entries"));
        assert!(!conf.2.contains_key("podman_v6_entries"));

        let ips = conf.0.lookup(&IP_10_89_0_2, "", "88dde8a24897");
        assert_eq!(ips, Some(vec![IP_10_89_0_3, IP_FDFD_733B_DC3_220B_3]));
    }
    /* -------------------------------------------- */
    // ---Test aardvark-dns reverse lookup logic --
    /* -------------------------------------------- */
    #[test]
    // Check reverse lookup query from backend and simulate
    // dns request from same container to itself by IP
    // aardvark must return container name and alias
    // Same container --> (resolve) Same ip  --> (on) Same Network
    fn test_reverse_lookup_queries_from_backend_by_ip_v4() {
        match parse_configs("src/test/config/podman") {
            Ok((backend, _, _)) => {
                match backend
                    .reverse_lookup(&"10.88.0.4".parse().unwrap(), &"10.88.0.4".parse().unwrap())
                {
                    Some(lookup_vec) => {
                        assert_eq!(
                            &vec![
                                "trustingzhukovsky".to_string(),
                                "ctr1".to_string(),
                                "ctra".to_string()
                            ],
                            lookup_vec
                        );
                    }
                    _ => panic!("unexpected dns result"),
                }
            }
            Err(e) => panic!("{}", e),
        }
    }
    #[test]
    // Check reverse lookup query from backend and simulate
    // dns request from same container to itself by IP
    // aardvark must return container name and alias
    // Same container --> (resolve) Same ip  --> (on) Same Network
    fn test_reverse_lookup_queries_from_backend_by_ip_v6() {
        match parse_configs("src/test/config/podman_v6_entries") {
            Ok((backend, _, _)) => {
                match backend.reverse_lookup(
                    &"fdfd:733b:dc3:220b::2".parse().unwrap(),
                    &"fdfd:733b:dc3:220b::2".parse().unwrap(),
                ) {
                    Some(lookup_vec) => {
                        assert_eq!(
                            &vec!["test1".to_string(), "7b46c7ad93fc".to_string()],
                            lookup_vec
                        );
                    }
                    _ => panic!("unexpected dns result"),
                }
            }
            Err(e) => panic!("{}", e),
        }
    }
    /* -------------------------------------------- */
    // ---------Test aardvark-dns backend ---------
    /* -------------------------------------------- */
    #[test]
    // Check ip_mappings generated by backend
    fn test_generated_ip_mappings_in_backend() {
        match parse_configs("src/test/config/podman_v6_entries") {
            Ok((backend, listen_ip_v4, listen_ip_v6)) => {
                listen_ip_v6.contains_key("podman_v6_entries");
                listen_ip_v4.contains_key("podman_v6_entries");
                backend
                    .ip_mappings
                    .contains_key(&"fdfd:733b:dc3:220b::2".parse().unwrap());
                backend
                    .ip_mappings
                    .contains_key(&"10.89.0.3".parse().unwrap());
                assert_eq!(
                    vec!["podman_v6_entries"],
                    backend.ip_mappings[&"fdfd:733b:dc3:220b::2".parse().unwrap()]
                );
                assert_eq!(
                    vec!["podman_v6_entries"],
                    backend.ip_mappings[&"10.89.0.3".parse().unwrap()]
                );
            }
            Err(e) => panic!("{}", e),
        }
    }
    #[test]
    // Check name_mappings generated by backend
    fn test_generated_name_mappings_in_backend() {
        match parse_configs("src/test/config/podman_v6_entries") {
            Ok((backend, listen_ip_v4, listen_ip_v6)) => {
                listen_ip_v6.contains_key("podman_v6_entries");
                listen_ip_v4.contains_key("podman_v6_entries");
                // check if contains key
                backend.name_mappings.contains_key("podman_v6_entries");
                // container id must be in name entries
                backend.name_mappings["podman_v6_entries"].contains_key("7b46c7ad93fc");
                backend.name_mappings["podman_v6_entries"].contains_key("88dde8a24897");
                // container names must be in name entries
                backend.name_mappings["podman_v6_entries"].contains_key("test1");
                backend.name_mappings["podman_v6_entries"].contains_key("test2");
                assert_eq!(
                    "10.89.0.3".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["test2"][0])
                );
                assert_eq!(
                    "fdfd:733b:dc3:220b::3".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["test2"][1])
                );
                // name entries must contain all ip addresses for container test1
                assert_eq!(
                    "10.89.0.2".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["test1"][0])
                );
                assert_eq!(
                    "fdfd:733b:dc3:220b::2".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["test1"][1])
                );
                // name entries must contain all ip addresses for container with id 7b46c7ad93fc
                assert_eq!(
                    "10.89.0.2".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["7b46c7ad93fc"][0])
                );
                assert_eq!(
                    "fdfd:733b:dc3:220b::2".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["7b46c7ad93fc"][1])
                );
                // name entries must contain all ip addresses for container with id 88dde8a24897
                assert_eq!(
                    "10.89.0.3".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["88dde8a24897"][0])
                );
                assert_eq!(
                    "fdfd:733b:dc3:220b::3".parse(),
                    Ok(backend.name_mappings["podman_v6_entries"]["88dde8a24897"][1])
                );
            }
            Err(e) => panic!("{}", e),
        }
    }
    #[test]
    // Check reverse_mappings generated by backend
    fn test_generated_reverse_mappings_in_backend() {
        match parse_configs("src/test/config/podman_v6_entries") {
            Ok((backend, listen_ip_v4, listen_ip_v6)) => {
                listen_ip_v6.contains_key("podman_v6_entries");
                listen_ip_v4.contains_key("podman_v6_entries");
                // all ips must have reverse lookups
                backend.reverse_mappings["podman_v6_entries"]
                    .contains_key(&"10.89.0.3".parse().unwrap());
                backend.reverse_mappings["podman_v6_entries"]
                    .contains_key(&"10.89.0.2".parse().unwrap());
                backend.reverse_mappings["podman_v6_entries"]
                    .contains_key(&"fdfd:733b:dc3:220b::2".parse().unwrap());
                backend.reverse_mappings["podman_v6_entries"]
                    .contains_key(&"fdfd:733b:dc3:220b::3".parse().unwrap());
            }
            Err(e) => panic!("{}", e),
        }
    }

    #[test]
    // Parse a config which contains multiple ipv4 and ipv6 addresses ona single line
    fn test_parse_multiple_ipv4_ipv6_addresses() {
        match parse_configs("src/test/config/podman_v6_entries") {
            Ok((backend, listen_ip_v4, listen_ip_v6)) => {
                assert_eq!(
                    listen_ip_v4["podman_v6_entries_proper"],
                    vec![
                        "10.0.0.1".parse::<Ipv4Addr>().unwrap(),
                        "10.0.1.1".parse().unwrap()
                    ]
                );
                assert_eq!(
                    listen_ip_v6["podman_v6_entries_proper"],
                    vec![
                        "fdfd::1".parse::<Ipv6Addr>().unwrap(),
                        "fddd::1".parse().unwrap()
                    ]
                );
                match backend.lookup(&"10.0.0.2".parse().unwrap(), "", "testmulti1") {
                    Some(ip_vec) => {
                        assert_eq!(
                            ip_vec,
                            vec![
                                "10.0.0.2".parse::<IpAddr>().unwrap(),
                                "10.0.1.2".parse().unwrap(),
                                "fdfd::2".parse().unwrap(),
                                "fddd::2".parse().unwrap()
                            ]
                        )
                    }
                    _ => panic!("unexpected dns result"),
                }

                match backend.lookup(&"10.0.0.2".parse().unwrap(), "", "testmulti2") {
                    Some(ip_vec) => {
                        assert_eq!(
                            ip_vec,
                            vec![
                                "10.0.0.3".parse::<IpAddr>().unwrap(),
                                "10.0.1.3".parse().unwrap(),
                                "fdfd::3".parse().unwrap(),
                                "fddd::3".parse().unwrap()
                            ]
                        )
                    }
                    _ => panic!("unexpected dns result"),
                }
            }
            Err(e) => panic!("{}", e),
        }
    }
}
0707010000004B000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001900000000aardvark-dns-1.14.0/test0707010000004C000081A400000000000000000000000167AA032200003848000000000000000000000000000000000000003800000000aardvark-dns-1.14.0/test/100-basic-name-resolution.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers


HELPER_PID=
function teardown() {
	if [[ -n "$HELPER_PID" ]]; then
		kill -9 $HELPER_PID
	fi
	basic_teardown
}

# custom DNS server is set to `127.0.0.255` which is invalid DNS server
# hence all the external request must fail, this test is expected to fail
# with exit code 124
@test "basic container - dns itself (custom bad dns server)" {
	setup_dnsmasq

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" custom_dns_server='"127.0.0.255"' aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

    # custom dns server is set to 127.0.0.255 which is not a valid DNS server so external DNS request must fail
	expected_rc=124 run_in_container_netns "$a1_pid" "dig" "+short" "$TEST_DOMAIN" "@$gw"
}

# custom DNS server is set to `8.8.8.8, 1.1.1.1` which is valid DNS server
# hence all the external request must paas.
@test "basic container - dns itself (custom good dns server)" {
	setup_dnsmasq

	# launch dnsmasq to run a second local server with a unique name so we know custom_dns_server works
	run_in_host_netns dnsmasq --conf-file=/dev/null --pid-file="$AARDVARK_TMPDIR/dnsmasq2.pid" \
		--except-interface=lo --listen-address=127.1.1.53 --bind-interfaces  \
		--address=/unique-name.local/192.168.0.1 --no-resolv --no-hosts
	HELPER_PID=$(cat $AARDVARK_TMPDIR/dnsmasq2.pid)

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" custom_dns_server='"127.1.1.53"' aliases='"a1", "1a"'

	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "aone" "@$gw"
	# check for TTL 0 here as well
	assert "$output" =~ "aone\.[[:space:]]*0[[:space:]]*IN[[:space:]]*A[[:space:]]*$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	run_in_container_netns "$a1_pid" "dig" "+short" "unique-name.local" "@$gw"
	# validate that we get the right ip
	assert "$output" == "192.168.0.1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}

@test "basic container - dns itself (bad and good should fall back)" {
	setup_dnsmasq

	# using sh-exec to keep the udp query hanging for at least 3 seconds
	nsenter -m -n -t $HOST_NS_PID nc -l -u 127.5.5.5 53 --sh-exec "sleep 3" 3>/dev/null &
	HELPER_PID=$!

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" custom_dns_server='"127.5.5.5", "127.0.0.1"' aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID

    # first custom server is wrong but second server should work
	run_in_container_netns "$a1_pid" "dig" "$TEST_DOMAIN" "@$gw"
	assert "$output" =~ "Query time: [23][0-9]{3} msec" "timeout should be 2.5s so request should then work shortly after (udp)"

	# Now the same with tcp.
	nsenter -m -n -t $HOST_NS_PID nc -l 127.5.5.5 53 --sh-exec "sleep 3" 3>/dev/null &
	HELPER_PID=$!
	run_in_container_netns "$a1_pid" "dig" +tcp "$TEST_DOMAIN" "@$gw"
	assert "$output" =~ "Query time: [23][0-9]{3} msec" "timeout should be 2.5s so request should then work shortly after (tcp)"
}

@test "basic container - dns itself custom" {
	setup_dnsmasq

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	# check TCP support
	run_in_container_netns "$a1_pid" "dig" "+tcp" "+short" "aone" "@$gw"
	assert "$ip_a1"


	run_in_container_netns "$a1_pid" "dig" "+short" "$TEST_DOMAIN" "@$gw"
	# validate that we get an ipv4
	assert "$output" =~ "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	# check TCP support for forwarding
	# note there is no guarantee that the forwarding is happening via TCP though
	# TODO add custom dns record that is to big for udp so we can be sure...
	run_in_container_netns "$a1_pid" "dig" "+tcp" "$TEST_DOMAIN" "@$gw"
	# validate that we get an ipv4
	assert "$output" =~ "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"
	# TODO This is not working on rhel/centos 9 as the dig version there doesn't print the line,
	# so we trust that dig +tcp does the right thing.
	# assert "$output" =~ "\(TCP\)" "server used TCP"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}

@test "basic container - ndots incomplete entry" {
	setup_dnsmasq

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" \
		subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "someshortname" "@$gw"
	assert "$output" =~ "status: REFUSED" "dnsmasq returns REFUSED"

	run_in_container_netns "$a1_pid" "dig" "+short" "testname" "@$gw"
	assert "198.51.100.1" "should resolve local name from external nameserver (dnsmasq)"
}

@test "basic container - dns itself on container with ipaddress v6" {
	setup_dnsmasq

	subnet_a=$(random_subnet 6)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw" "AAAA"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	run_in_container_netns "$a1_pid" "dig" "+short" "$TEST_DOMAIN" "@$gw" "AAAA"
	# validate that we got valid ipv6
	# check that the output is not empty
	assert "$lines[0]" != "" "got at least one result"
	for ip in "${lines[@]}"; do
		run_helper ipcalc -6c "$ip"
	done
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}

@test "basic container - dns itself with long network name" {
	subnet_a=$(random_subnet 5)
	long_name="podman11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"
	create_config network_name="$long_name" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.$long_name.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.$long_name.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}

@test "two containers on the same network" {
	# container a1
	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1="$config"
	a1_ip=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID

	# container a2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="atwo" subnet="$subnet_a" aliases='"a2", "2a"'
	config_a2="$config"
	a2_ip=$(echo "$config_a2" | jq -r .networks.podman1.static_ips[0])
	create_container "$config_a2"
	a2_pid="$CONTAINER_NS_PID"

	# Resolve container names to IPs
	dig "$a1_pid" "atwo" "$gw"
	assert "$a2_ip"
	# Set recursion bit
        assert "$output" !~ "WARNING: recursion requested but not available"
	dig "$a2_pid" "aone" "$gw"
	assert "$a1_ip"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}

# Internal network, meaning no DNS servers.
# Hence all external requests must fail.
@test "basic container - internal network has no DNS" {
	setup_dnsmasq

	subnet_a=$(random_subnet)
	create_config network_name="podman1" internal=true container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" custom_dns_server='"1.1.1.1","8.8.8.8"' aliases='"a1", "1a"'
	config_a1=$config
	# Network name is still recorded as podman1
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	# Internal network means no DNS server means this should hard-fail
	expected_rc=1 run_in_container_netns "$a1_pid" "host" "-t" "ns" "$TEST_DOMAIN" "$gw"
	assert "$output" =~ "Host $TEST_DOMAIN not found"
	assert "$output" =~ "NXDOMAIN"
}

# Internal network, but this time with IPv6. Same result as above expected.
@test "basic container - internal network has no DNS - ipv6" {
	setup_dnsmasq

	subnet_a=$(random_subnet 6)
	# Cloudflare and Google public anycast DNS v6 nameservers
	create_config network_name="podman1" internal=true container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" custom_dns_server='"2606:4700:4700::1111","2001:4860:4860::8888"' aliases='"a1", "1a"'
	config_a1=$config
	# Network name is still recorded as podman1
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "+short" "aone" "@$gw" "AAAA"
	assert "$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	# Internal network means no DNS server means this should hard-fail
	expected_rc=1 run_in_container_netns "$a1_pid" "host" "-t" "ns" "$TEST_DOMAIN" "$gw"
	assert "$output" =~ "Host $TEST_DOMAIN not found"
	assert "$output" =~ "NXDOMAIN"
}

@test "host dns on ipv6 link local" {
	# create a local interface with a link local ipv6 address
	# disable dad as it takes some time so the initial connection fails without it
	run_in_host_netns sysctl -w net.ipv6.conf.default.accept_dad=0
	run_in_host_netns ip link set lo up
	run_in_host_netns ip link add test type bridge
	run_in_host_netns ip link set test up
	run_in_host_netns ip -j addr
	link_local_addr=$(jq -r '.[] | select(.ifname=="test").addr_info[0].local' <<<"$output")

	# update our fake netns resolv.conf with the link local address as only nameserver
	echo "nameserver $link_local_addr%test" >"$AARDVARK_TMPDIR/resolv.conf"
	run_in_host_netns mount --bind "$AARDVARK_TMPDIR/resolv.conf" /etc/resolv.conf

	# launch dnsmasq to run a second local server with a unique name so we know custom_dns_server works
	run_in_host_netns dnsmasq --conf-file=/dev/null --pid-file="$AARDVARK_TMPDIR/dnsmasq2.pid" \
		--except-interface=lo --listen-address="$link_local_addr" --bind-interfaces  \
		--address=/unique-name.local/192.168.0.1 --no-resolv --no-hosts
	HELPER_PID=$(cat $AARDVARK_TMPDIR/dnsmasq2.pid)

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"

	config_a1=$config
	ip_a1=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID
	run_in_container_netns "$a1_pid" "dig" "aone" "@$gw"
	# check for TTL 0 here as well
	assert "$output" =~ "aone\.[[:space:]]*0[[:space:]]*IN[[:space:]]*A[[:space:]]*$ip_a1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"

	run_in_container_netns "$a1_pid" "dig" "+short" "unique-name.local" "@$gw"
	# validate that we get the right ip
	assert "$output" == "192.168.0.1"
	# Set recursion bit is already set if requested so output must not
	# contain unexpected warning.
	assert "$output" !~ "WARNING: recursion requested but not available"
}
0707010000004D000081A400000000000000000000000167AA0322000010F5000000000000000000000000000000000000002F00000000aardvark-dns-1.14.0/test/200-two-networks.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers

@test "two containers on different networks" {
	setup_dnsmasq

	# container a1 on subnet a
	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
	a1_config="$config"
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	a_gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$a1_config"
	a1_pid="$CONTAINER_NS_PID"

	# container b1 on subnet b
	subnet_b=$(random_subnet 5)
	create_config network_name="podman2" container_id=$(random_string 64) container_name="bone" subnet="$subnet_b"
	b1_config="$config"
	b1_ip=$(echo "$b1_config" | jq -r .networks.podman2.static_ips[0])
	b_gw=$(echo "$b1_config" | jq -r .network_info.podman2.subnets[0].gateway)
	create_container "$b1_config"
	b1_pid="$CONTAINER_NS_PID"

	# container a1 should not resolve b1 and we should get
	# a NXDOMAIN
	run_in_container_netns "$a1_pid" "dig" "bone" "@$a_gw"
	assert "$output" =~ "status: NXDOMAIN" "a1 resolves b2"

	# container b1 should not resolve a1 and we should get
	# a NXDOMAIN
	run_in_container_netns "$b1_pid" "dig" "aone" "@$b_gw"
	assert "$output" =~ "status: NXDOMAIN" "b1 resolves a1"

	# a1 should be able to resolve itself
	dig "$a1_pid" "aone" "$a_gw"
	assert $a1_ip
	# b1 should be able to resolve itself
	dig "$b1_pid" "bone" "$b_gw"
	assert $b1_ip

	# we should be able to resolve a from the host if we use the a gw as server
	run_in_host_netns dig +short "aone" "@$a_gw"
	assert $a1_ip
	#  but NOT when using b as server
	run_in_host_netns "dig" "aone" "@$b_gw"
	assert "$output" =~ "status: NXDOMAIN" "b1 listener can resolve a1"

	# but b on network b is allowed again
	run_in_host_netns dig +short "bone" "@$b_gw"
	assert $b1_ip
}

@test "two subnets with isolated container and one shared" {
	setup_dnsmasq

	# container a1 on subnet a
	subnet_a=$(random_subnet 5)
	subnet_b=$(random_subnet 5)

	# A1
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
	a1_config=$config
	a1_container_id=$(echo "$a1_config" | jq -r .container_id)
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	a_gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	a1_hash=$(echo "$a1_config" | jq -r .network_info.podman1.id)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# container b1 on subnet b
	create_config network_name="podman2" container_id=$(random_string 64) container_name="bone" subnet="$subnet_b"
	b1_config=$config
	b1_ip=$(echo "$b1_config" | jq -r .networks.podman2.static_ips[0])
	b_gw=$(echo "$b1_config" | jq -r .network_info.podman2.subnets[0].gateway)
	b1_hash=$(echo "$b1_config" | jq -r .network_info.podman1.id)
	create_container "$b1_config"
	b1_pid=$CONTAINER_NS_PID
	b_subnets=$(echo $b1_config | jq -r .network_info.podman2.subnets[0])

	# AB2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="abtwo" subnet="$subnet_a"
	a2_config=$config
	a2_ip=$(echo "$a2_config" | jq -r .networks.podman1.static_ips[0])

	b2_ip=$(random_ip_in_subnet "$subnet_b")
	create_network "podman2" "$b2_ip" "eth1"
	b2_network="{$new_network}"
	create_network_infos "podman2" "$b1_hash" "$b_subnets"
	b2_network_info="{$new_network_info}"
	ab2_config=$(jq -r ".networks +=  $b2_network" <<<"$a2_config")
	ab2_config=$(jq -r ".network_info += $b2_network_info" <<<"$ab2_config")

	create_container "$ab2_config"
	ab2_pid=$CONTAINER_NS_PID

	# aone should be able to resolve AB2 and NOT B1
	dig "$a1_pid" "abtwo" "$a_gw"
	assert "$a2_ip"
	dig "$a1_pid" "bone" "$a_gw"
	assert ""

	# bone should be able to resolve AB2 and NOT A1
	dig "$b1_pid" "abtwo" "$b_gw"
	assert "$b2_ip"
	dig "$b1_pid" "aone" "$b_gw"
	assert ""

	# abtwo should be able to resolve A1, B1, and AB2 on both gws
	dig "$ab2_pid" "aone" "$a_gw"
	assert "$a1_ip"
	dig "$ab2_pid" "bone" "$b_gw"
	assert "$b1_ip"
	# check ab2 from itself, first from the a side
	dig "$ab2_pid" "abtwo" "$a_gw"
	assert "${#lines[@]}" = 2
	assert "$output" =~ "$a2_ip"
	assert "$output" =~ "$b2_ip"

	# and now from the bside
	dig "$ab2_pid" "abtwo" "$b_gw"
	assert "${#lines[@]}" = 2
	assert "$output" =~ "$a2_ip"
	assert "$output" =~ "$b2_ip"
}
0707010000004E000081A400000000000000000000000167AA032200002835000000000000000000000000000000000000003100000000aardvark-dns-1.14.0/test/300-three-networks.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers

@test "three networks with a connect" {
	setup_dnsmasq

	subnet_a=$(random_subnet 5)
	subnet_b=$(random_subnet 5)

	# A1
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
	a1_config=$config
	a1_container_id=$(echo "$a1_config" | jq -r .container_id)
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	a_gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	a1_hash=$(echo "$a1_config" | jq -r .network_info.podman1.id)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# container b1 on subnet b
	create_config network_name="podman2" container_id=$(random_string 64) container_name="bone" subnet="$subnet_b"
	b1_config=$config
	b1_ip=$(echo "$b1_config" | jq -r .networks.podman2.static_ips[0])
	b_gw=$(echo "$b1_config" | jq -r .network_info.podman2.subnets[0].gateway)
	b1_hash=$(echo "$b1_config" | jq -r .network_info.podman1.id)
	create_container "$b1_config"
	b1_pid=$CONTAINER_NS_PID
	b_subnets=$(echo $b1_config | jq -r .network_info.podman2.subnets[0])

	# AB2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="abtwo" subnet="$subnet_a"
	a2_config=$config
	a2_ip=$(echo "$a2_config" | jq -r .networks.podman1.static_ips[0])

	b2_ip=$(random_ip_in_subnet "$subnet_b")
	create_network "podman2" "$b2_ip" "eth1"
	b2_network="{$new_network}"
	create_network_infos "podman2" "$b1_hash" "$b_subnets"
	b2_network_info="{$new_network_info}"
	ab2_config=$(jq -r ".networks +=  $b2_network" <<<"$a2_config")
	ab2_config=$(jq -r ".network_info += $b2_network_info" <<<"$ab2_config")

	create_container "$ab2_config"
	ab2_pid=$CONTAINER_NS_PID

	# aone should be able to resolve AB2 and NOT B1
	dig "$a1_pid" "abtwo" "$a_gw"
	assert "$a2_ip"
	dig "$a1_pid" "bone" "$a_gw"
	assert ""

	# bone should be able to resolve AB2 and NOT A1
	dig "$b1_pid" "abtwo" "$b_gw"
	assert "$b2_ip"
	dig "$b1_pid" "aone" "$b_gw"
	assert ""

	# abtwo should be able to resolve A1, B1, and AB2 on both gws
	dig "$ab2_pid" "aone" "$a_gw"
	assert "$a1_ip"
	dig "$ab2_pid" "aone" "$b_gw"
	assert "$a1_ip"

	dig "$ab2_pid" "bone" "$a_gw"
	assert "$b1_ip"
	dig "$ab2_pid" "bone" "$b_gw"
	assert "$b1_ip"

	# now the same again with search domain set
	dig "$ab2_pid" "aone.dns.podman" "$a_gw"
	assert "$a1_ip"
	dig "$ab2_pid" "aone.dns.podman" "$b_gw"
	assert "$a1_ip"

	dig "$ab2_pid" "bone.dns.podman" "$a_gw"
	assert "$b1_ip"
	dig "$ab2_pid" "bone.dns.podman" "$b_gw"
	assert "$b1_ip"

	# check ab2 from itself, first from the a side
	dig "$ab2_pid" "abtwo" "$a_gw"
	assert "${#lines[@]}" = 2
	assert "$output" =~ "$a2_ip"
	assert "$output" =~ "$b2_ip"

	# and now from the bside
	dig "$ab2_pid" "abtwo" "$b_gw"
	assert "${#lines[@]}" = 2
	assert "$output" =~ "$a2_ip"
	assert "$output" =~ "$b2_ip"
}

@test "three subnets, one container on two of the subnets, network connect" {
	# Create all three subnets
	subnet_a=$(random_subnet 5)
	subnet_b=$(random_subnet 5)
	subnet_c=$(random_subnet 5)

	# A1 on subnet A
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
	a1_config=$config
	a1_container_id=$(echo "$a1_config" | jq -r .container_id)
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	a_gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	a1_hash=$(echo "$a1_config" | jq -r .network_info.podman1.id)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# C1 on subnet C
	create_config network_name="podman3" container_id=$(random_string 64) container_name="cone" subnet="$subnet_c"
	c1_config=$config
	c1_container_id=$(echo "$c1_config" | jq -r .container_id)
	c1_ip=$(echo "$c1_config" | jq -r .networks.podman3.static_ips[0])
	c_gw=$(echo "$c1_config" | jq -r .network_info.podman3.subnets[0].gateway)
	c1_hash=$(echo "$c1_config" | jq -r .network_info.podman3.id)
	create_container "$c1_config"
	c1_pid=$CONTAINER_NS_PID
	c_subnets=$(echo $c1_config | jq -r .network_info.podman3.subnets[0])

	# We now have one container on A and one on C.  We now similate
	# a network connect on both to B.
	#
	# This is also where things get tricky and we are trying to mimic
	# a connect. First, we need to trim off the last two container
	# configs for teardown. We will leave the NS_PIDS alone because
	# the order should be OK.

	# Create B1 config for network connect
	create_config network_name="podman2" container_id=$(random_string 64) container_name="aone" subnet="$subnet_b" aliases='"aone_nw"'
	b1_config=$config
	# The container ID should be the same
	b1_config=$(jq ".container_id  |= \"$a1_container_id\"" <<<"$b1_config")
	b1_config=$(jq ".networks.podman2.interface_name |= \"eth1\"" <<<"$b1_config")
	b1_network=$(echo "$b1_config" | jq -r .networks)
	b1_network_info=$(echo "$b1_config" | jq -r .network_info)
	b1_ip=$(echo "$b1_network" | jq -r .podman2.static_ips[0])
	b_gw=$(echo "$b1_network_info" | jq -r .podman2.subnets[0].gateway)

	# Now we must merge a1 and b1 for eventual teardown
	a1b1_config=$(jq -r ".networks += $b1_network" <<<"$a1_config")
	a1b1_config=$(jq -r ".network_info += $b1_network_info" <<<"$a1b1_config")

	# Create B2 config for network connect
	#
	create_config network_name="podman2" container_id=$(random_string 64) container_name="cone" subnet="$subnet_b" aliases='"cone_nw"'
	b2_config=$config
	# The container ID should be the same
	b2_config=$(jq ".container_id  |= \"$c1_container_id\"" <<<"$b2_config")
	b2_config=$(jq ".networks.podman2.interface_name |= \"eth1\"" <<<"$b2_config")
	b2_network=$(echo "$b2_config" | jq -r .networks)
	b2_network_info=$(echo "$b2_config" | jq -r .network_info)
	b2_ip=$(echo "$b2_network" | jq -r .podman2.static_ips[0])

	# Now we must merge c1 and b2 for eventual teardown
	c1b2_config=$(jq -r ".networks += $b2_network" <<<"$c1_config")
	c1b2_config=$(jq -r ".network_info += $b2_network_info" <<<"$c1b2_config")

	# Create the containers but do not add to NS_PIDS or CONTAINER_CONFIGS
	connect "$a1_pid" "$b1_config"
	connect "$c1_pid" "$b2_config"

	# Reset CONTAINER_CONFIGS and add the two news ones
	CONTAINER_CONFIGS=("$a1b1_config" "$c1b2_config")

	# Verify
	# b1 should be able to resolve cone through b subnet
	dig "$a1_pid" "cone" "$b_gw"
	assert "$b2_ip"

	# a1 should be able to resolve cone
	dig "$a1_pid" "cone" "$a_gw"
	assert "$b2_ip"

	# a1b1 should be able to resolve cone_nw alias
	dig "$a1_pid" "cone_nw" "$a_gw"
	assert "$b2_ip"

	# b2 should be able to resolve cone through b subnet
	dig "$c1_pid" "aone" "$b_gw"
	assert "$b1_ip"

	# c1 should be able to resolve aone
	dig "$c1_pid" "aone" "$c_gw"
	assert "$b1_ip"

	# b2c1 should be able to resolve aone_nw alias
	dig "$c1_pid" "aone_nw" "$c_gw"
	assert "$b1_ip"
}


@test "three subnets two ipaddress v6 and one ipaddress v4, one container on two of the subnets, network connect" {
	# Create all three subnets
	# Two of the subnets must be on ip addresss v6 and one on ip address v4
	subnet_a=$(random_subnet 5)
	subnet_b=$(random_subnet 6)
	subnet_c=$(random_subnet 6)

	# A1 on subnet A
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
	a1_config=$config
	a1_container_id=$(echo "$a1_config" | jq -r .container_id)
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	a_gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	a1_hash=$(echo "$a1_config" | jq -r .network_info.podman1.id)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# C1 on subnet C
	create_config network_name="podman3" container_id=$(random_string 64) container_name="cone" subnet="$subnet_c"
	c1_config=$config
	c1_container_id=$(echo "$c1_config" | jq -r .container_id)
	c1_ip=$(echo "$c1_config" | jq -r .networks.podman3.static_ips[0])
	c_gw=$(echo "$c1_config" | jq -r .network_info.podman3.subnets[0].gateway)
	c1_hash=$(echo "$c1_config" | jq -r .network_info.podman3.id)
	create_container "$c1_config"
	c1_pid=$CONTAINER_NS_PID
	c_subnets=$(echo $c1_config | jq -r .network_info.podman3.subnets[0])

	# We now have one container on A and one on C.  We now similate
	# a network connect on both to B.

	# Create B1 config for network connect
	create_config network_name="podman2" container_id=$(random_string 64) container_name="aone" subnet="$subnet_b" aliases='"aone_nw"'
	b1_config=$config
	# The container ID should be the same
	b1_config=$(jq ".container_id  |= \"$a1_container_id\"" <<<"$b1_config")
	b1_config=$(jq ".networks.podman2.interface_name |= \"eth1\"" <<<"$b1_config")
	b1_network=$(echo "$b1_config" | jq -r .networks)
	b1_network_info=$(echo "$b1_config" | jq -r .network_info)
	b1_ip=$(echo "$b1_network" | jq -r .podman2.static_ips[0])
	b_gw=$(echo "$b1_network_info" | jq -r .podman2.subnets[0].gateway)

	# Now we must merge a1 and b1 for eventual teardown
	a1b1_config=$(jq -r ".networks += $b1_network" <<<"$a1_config")
	a1b1_config=$(jq -r ".network_info += $b1_network_info" <<<"$a1b1_config")

	# Create B2 config for network connect
	#
	create_config network_name="podman2" container_id=$(random_string 64) container_name="cone" subnet="$subnet_b" aliases='"cone_nw"'
	b2_config=$config
	# The container ID should be the same
	b2_config=$(jq ".container_id  |= \"$c1_container_id\"" <<<"$b2_config")
	b2_config=$(jq ".networks.podman2.interface_name |= \"eth1\"" <<<"$b2_config")
	b2_network=$(echo "$b2_config" | jq -r .networks)
	b2_network_info=$(echo "$b2_config" | jq -r .network_info)
	b2_ip=$(echo "$b2_network" | jq -r .podman2.static_ips[0])

	# Now we must merge c1 and b2 for eventual teardown
	c1b2_config=$(jq -r ".networks += $b2_network" <<<"$c1_config")
	c1b2_config=$(jq -r ".network_info += $b2_network_info" <<<"$c1b2_config")

	# Create the containers but do not add to NS_PIDS or CONTAINER_CONFIGS
	connect "$a1_pid" "$b1_config"
	connect "$c1_pid" "$b2_config"

	# Reset CONTAINER_CONFIGS and add the two news ones
	CONTAINER_CONFIGS=("$a1b1_config" "$c1b2_config")

	# Verify
	# b1 should be able to resolve cone through b subnet
	dig "$a1_pid" "cone" "$b_gw" "AAAA"
	assert "$b2_ip"

	# a1 should be able to resolve cone
	dig "$a1_pid" "cone" "$a_gw" "AAAA"
	assert "$b2_ip"
}
0707010000004F000081A400000000000000000000000167AA0322000003EE000000000000000000000000000000000000002A00000000aardvark-dns-1.14.0/test/400-aliases.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers

@test "two containers on the same network with aliases" {
	# container a1
	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	config_a1="$config"
	a1_ip=$(echo "$config_a1" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$config_a1" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$config_a1"
	a1_pid=$CONTAINER_NS_PID

	# container a2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="atwo" subnet="$subnet_a" aliases='"a2", "2a"'
	config_a2="$config"
	a2_ip=$(echo "$config_a2" | jq -r .networks.podman1.static_ips[0])
	create_container "$config_a2"
	a2_pid="$CONTAINER_NS_PID"

	dig "$a1_pid" "a2" "$gw"
	assert "$a2_ip"
	dig "$a1_pid" "2a" "$gw"
	assert "$a2_ip"
	dig "$a2_pid" "a1" "$gw"
	assert "$a1_ip"
	dig "$a2_pid" "1a" "$gw"
	assert "$a1_ip"
}
07070100000050000081A400000000000000000000000167AA032200000CD2000000000000000000000000000000000000003200000000aardvark-dns-1.14.0/test/500-reverse-lookups.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers

@test "check reverse lookups" {
	# container a1
	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	a1_config="$config"
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# container a2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="atwo" subnet="$subnet_a" aliases='"a2", "2a"'
	a2_config="$config"
	a2_ip=$(echo "$a2_config" | jq -r .networks.podman1.static_ips[0])
	create_container "$a2_config"
	a2_pid="$CONTAINER_NS_PID"

	echo "a1 config:\n${a1_config}\n"
	echo "a2 config:\n${a2_config}\n"

	# Resolve IPs to container names
	dig_reverse "$a1_pid" "$a2_ip" "$gw"
	echo -e "Output:\n${output}\n"
	a2_expected_name=$(echo $a2_ip | awk -F. '{printf "%d.%d.%d.%d.in-addr.arpa.", $4, $3, $2, $1}')
	assert "$output" =~ "$a2_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*atwo\."
	assert "$output" =~ "$a2_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*a2\."
	assert "$output" =~ "$a2_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*2a\."
	dig_reverse "$a2_pid" "$a1_ip" "$gw"
	echo -e "Output:\n${output}\n"
	a1_expected_name=$(echo $a1_ip | awk -F. '{printf "%d.%d.%d.%d.in-addr.arpa.", $4, $3, $2, $1}')
	assert "$output" =~ "$a1_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*aone\."
	assert "$output" =~ "$a1_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*a1\."
	assert "$output" =~ "$a1_expected_name[[:space:]]*0[[:space:]]*IN[[:space:]]*PTR[[:space:]]*1a\."
}

@test "check reverse lookups on ipaddress v6" {
	# container a1
	subnet_a=$(random_subnet 6)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a" aliases='"a1", "1a"'
	a1_config="$config"
	a1_ip=$(echo "$a1_config" | jq -r .networks.podman1.static_ips[0])
	gw=$(echo "$a1_config" | jq -r .network_info.podman1.subnets[0].gateway)
	create_container "$a1_config"
	a1_pid=$CONTAINER_NS_PID

	# container a2
	create_config network_name="podman1" container_id=$(random_string 64) container_name="atwo" subnet="$subnet_a" aliases='"a2", "2a"'
	a2_config="$config"
	a2_ip=$(echo "$a2_config" | jq -r .networks.podman1.static_ips[0])
	create_container "$a2_config"
	a2_pid="$CONTAINER_NS_PID"

	echo "$a1_config"
	echo "$a2_config"

	# Resolve IPs to container names
	# It is much harder to construct the arpa address in ipv6 so we just check that we are in the fd::/8 range
	dig_reverse "$a1_pid" "$a2_ip" "$gw"
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]atwo\.'
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]a2\.'
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]2a\.'
	dig_reverse "$a2_pid" "$a1_ip" "$gw"
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]aone\.'
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]a1\.'
	assert "$output" =~ '([0-9a-f]\.){30}d\.f\.ip6\.arpa\.[ 	].*[ 	]1a\.'
}
07070100000051000081A400000000000000000000000167AA0322000005D9000000000000000000000000000000000000002900000000aardvark-dns-1.14.0/test/600-errors.bats#!/usr/bin/env bats   -*- bats -*-
#
# basic netavark tests
#

load helpers


NCPID=

function teardown() {
    kill -9 $NCPID
    basic_teardown
}

# check bind error on startup
@test "aardvark-dns should fail when udp port is already bound" {
	# bind the port to force a failure for aardvark-dns
	# we cannot use run_is_host_netns to run in the background
	nsenter -m -n -t $HOST_NS_PID nc -u -l 0.0.0.0 53 </dev/null 3> /dev/null &
	NCPID=$!

	# ensure nc has time to bind the port
	sleep 1

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
    gw=$(echo "$config" | jq -r .network_info.podman1.subnets[0].gateway)
	expected_rc=1 create_container "$config"
    assert "$output" =~ "failed to bind udp listener on $gw:53" "bind error message"
}

@test "aardvark-dns should fail when tcp port is already bound" {
	# bind the port to force a failure for aardvark-dns
	# we cannot use run_is_host_netns to run in the background
	nsenter -m -n -t $HOST_NS_PID nc -l 0.0.0.0 53 </dev/null 3> /dev/null &
	NCPID=$!

	# ensure nc has time to bind the port
	sleep 1

	subnet_a=$(random_subnet 5)
	create_config network_name="podman1" container_id=$(random_string 64) container_name="aone" subnet="$subnet_a"
    gw=$(echo "$config" | jq -r .network_info.podman1.subnets[0].gateway)
	expected_rc=1 create_container "$config"
    assert "$output" =~ "failed to bind tcp listener on $gw:53" "bind error message"
}
07070100000052000081A400000000000000000000000167AA03220000014C000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/test/dnsmasq.confinterface=lo
bind-interfaces

no-hosts
no-resolv

log-queries

user=

# aone and bone should return NXDOMAIN, by default dnsmasq returns REFUSED
address=/aone/
address=/bone/
address=/testname/198.51.100.1
address=/testname.local/198.51.100.2
address=/example.podman.io/198.51.100.100


txt-record=example.podman.io,"v=spf1 a -all"
07070100000053000081A400000000000000000000000167AA032200004592000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/test/helpers.bash# -*- bash -*-

# Netavark binary to run
NETAVARK=${NETAVARK:-/usr/libexec/podman/netavark}

TESTSDIR=${TESTSDIR:-$(dirname ${BASH_SOURCE})}

AARDVARK=${AARDVARK:-$TESTSDIR/../bin/aardvark-dns}

# export RUST_BACKTRACE so that we get a helpful stack trace
export RUST_BACKTRACE=full

# FIXME current 6.11.4 f40 kernel is broken and cannot use iptables with ipv6
export NETAVARK_FW=nftables

TEST_DOMAIN=example.podman.io

HOST_NS_PID=
CONTAINER_NS_PID=

CONTAINER_CONFIGS=()
CONTAINER_NS_PIDS=()

#### Functions below are taken from podman and buildah and adapted to netavark.

################
#  run_helper  #  Invoke args, with timeout, using BATS 'run'
################
#
# Second, we use 'timeout' to abort (with a diagnostic) if something
# takes too long; this is preferable to a CI hang.
#
# Third, we log the command run and its output. This doesn't normally
# appear in BATS output, but it will if there's an error.
#
# Next, we check exit status. Since the normal desired code is 0,
# that's the default; but the expected_rc var can override:
#
#     expected_rc=125 run_helper nonexistent-subcommand
#     expected_rc=?   run_helper some-other-command       # let our caller check status
#
# Since we use the BATS 'run' mechanism, $output and $status will be
# defined for our caller.
#
function run_helper() {
    # expected_rc if unset set default to 0
    expected_rc="${expected_rc-0}"
    if [ "$expected_rc" == "?" ]; then
        expected_rc=
    fi
    # Remember command args, for possible use in later diagnostic messages
    MOST_RECENT_COMMAND="$*"

    # stdout is only emitted upon error; this echo is to help a debugger
    echo "$_LOG_PROMPT $*"

    # BATS hangs if a subprocess remains and keeps FD 3 open; this happens
    # if a process crashes unexpectedly without cleaning up subprocesses.
    run timeout --foreground -v --kill=10 10 "$@" 3>&-
    # without "quotes", multiple lines are glommed together into one
    if [ -n "$output" ]; then
        echo "$output"
    fi
    if [ "$status" -ne 0 ]; then
        echo -n "[ rc=$status "
        if [ -n "$expected_rc" ]; then
            if [ "$status" -eq "$expected_rc" ]; then
                echo -n "(expected) "
            else
                echo -n "(** EXPECTED $expected_rc **) "
            fi
        fi
        echo "]"
    fi

    if [ "$status" -eq 124 ]; then
        if expr "$output" : ".*timeout: sending" >/dev/null; then
            # It's possible for a subtest to _want_ a timeout
            if [[ "$expected_rc" != "124" ]]; then
                echo "*** TIMED OUT ***"
                false
            fi
        fi
    fi

    if [ -n "$expected_rc" ]; then
        if [ "$status" -ne "$expected_rc" ]; then
            die "exit code is $status; expected $expected_rc"
        fi
    fi

    # unset
    unset expected_rc
}

#########
#  die  #  Abort with helpful message
#########
function die() {
    # FIXME: handle multi-line output
    echo "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" >&2
    echo "#| FAIL: $*" >&2
    echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2
    false
}
############
#  assert  #  Compare actual vs expected string; fail if mismatch
############
#
# Compares string (default: $output) against the given string argument.
# By default we do an exact-match comparison against $output, but there
# are two different ways to invoke us, each with an optional description:
#
#      xpect               "EXPECT" [DESCRIPTION]
#      xpect "RESULT" "OP" "EXPECT" [DESCRIPTION]
#
# The first form (one or two arguments) does an exact-match comparison
# of "$output" against "EXPECT". The second (three or four args) compares
# the first parameter against EXPECT, using the given OPerator. If present,
# DESCRIPTION will be displayed on test failure.
#
# Examples:
#
#   xpect "this is exactly what we expect"
#   xpect "${lines[0]}" =~ "^abc"  "first line begins with abc"
#
function assert() {
    local actual_string="$output"
    local operator='=='
    local expect_string="$1"
    local testname="$2"

    case "${#*}" in
    0) die "Internal error: 'assert' requires one or more arguments" ;;
    1 | 2) ;;
    3 | 4)
        actual_string="$1"
        operator="$2"
        expect_string="$3"
        testname="$4"
        ;;
    *) die "Internal error: too many arguments to 'assert'" ;;
    esac

    # Comparisons.
    # Special case: there is no !~ operator, so fake it via '! x =~ y'
    local not=
    local actual_op="$operator"
    if [[ $operator == '!~' ]]; then
        not='!'
        actual_op='=~'
    fi
    if [[ $operator == '=' || $operator == '==' ]]; then
        # Special case: we can't use '=' or '==' inside [[ ... ]] because
        # the right-hand side is treated as a pattern... and '[xy]' will
        # not compare literally. There seems to be no way to turn that off.
        if [ "$actual_string" = "$expect_string" ]; then
            return
        fi
    elif [[ $operator == '!=' ]]; then
        # Same special case as above
        if [ "$actual_string" != "$expect_string" ]; then
            return
        fi
    else
        if eval "[[ $not \$actual_string $actual_op \$expect_string ]]"; then
            return
        elif [ $? -gt 1 ]; then
            die "Internal error: could not process 'actual' $operator 'expect'"
        fi
    fi

    # Test has failed. Get a descriptive test name.
    if [ -z "$testname" ]; then
        testname="${MOST_RECENT_BUILDAH_COMMAND:-[no test name given]}"
    fi

    # Display optimization: the typical case for 'expect' is an
    # exact match ('='), but there are also '=~' or '!~' or '-ge'
    # and the like. Omit the '=' but show the others; and always
    # align subsequent output lines for ease of comparison.
    local op=''
    local ws=''
    if [ "$operator" != '==' ]; then
        op="$operator "
        ws=$(printf "%*s" ${#op} "")
    fi

    # This is a multi-line message, which may in turn contain multi-line
    # output, so let's format it ourself, readably
    local actual_split
    IFS=$'\n' read -rd '' -a actual_split <<<"$actual_string" || true
    printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
    printf "#|     FAIL: %s\n" "$testname" >&2
    printf "#| expected: %s'%s'\n" "$op" "$expect_string" >&2
    printf "#|   actual: %s'%s'\n" "$ws" "${actual_split[0]}" >&2
    local line
    for line in "${actual_split[@]:1}"; do
        printf "#|         > %s'%s'\n" "$ws" "$line" >&2
    done
    printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
    false
}

#################
#  assert_json  #  Compare actual json vs expected string; fail if mismatch
#################
# assert_json works like assert except that it accepts one extra parameter,
# the jq query string.
# There are two different ways to invoke us, each with an optional description:
#
#      xpect               "JQ_QUERY"      "EXPECT" [DESCRIPTION]
#      xpect "JSON_STRING" "JQ_QUERY" "OP" "EXPECT" [DESCRIPTION]
# Important this function will overwrite $output, so if you need to use the value
# more than once you need to safe it in another variable.
function assert_json() {
    local actual_json="$output"
    local operator='=='
    local jq_query="$1"
    local expect_string="$2"
    local testname="$3"

    case "${#*}" in
    0 | 1) die "Internal error: 'assert_json' requires two or more arguments" ;;
    2 | 3) ;;
    4 | 5)
        actual_json="$1"
        jq_query="$2"
        operator="$3"
        expect_string="$4"
        testname="$5"
        ;;
    *) die "Internal error: too many arguments to 'assert_json'" ;;
    esac
    run_helper jq -r "$jq_query" <<<"$actual_json"
    assert "$output" "$operator" "$expect_string" "$testname"
}

###################
#  random_string  #  Pseudorandom alphanumeric string of given length
###################
function random_string() {
    local length=${1:-10}
    head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
}

###################
#  random_subnet  # generate a random private subnet
###################
#
# by default it will return a 10.x.x.0/24 ipv4 subnet
# if "6" is given as first argument it will return a "fdx:x:x:x::/64" ipv6 subnet
function random_subnet() {
    if [[ "$1" == "6" ]]; then
        printf "fd%02x:%x:%x:%x::/64" $((RANDOM % 256)) $((RANDOM % 65535)) $((RANDOM % 65535)) $((RANDOM % 65535))
    else
        printf "10.%d.%d.0/24" $((RANDOM % 256)) $((RANDOM % 256))
    fi
}

#########################
#  random_ip_in_subnet  # get a random from a given subnet
#########################
# the first arg must be an subnet created by random_subnet
# otherwise this function might return an invalid ip
function random_ip_in_subnet() {
    # first trim subnet
    local net_ip=${1%/*}
    local num=
    local add=$2
    # if ip has colon it is ipv6
    if [[ "$net_ip" == *":"* ]]; then
        num=$((RANDOM % 65533 ))
        # see below
        num=$((num - num % 10 + add + 2))
        num=$(printf "%x" $num)
    else
        # if ipv4 we have to trim the final 0
        net_ip=${net_ip%0}
        # make sure to not get 0, 1 or 255
        num=$((RANDOM % 252))
        # Avoid giving out duplicated ips if we are called more than once.
        # The caller needs to keep a counter because this is executed ina subshell so we cannot use global var here.
        # Basically subtract mod 10 then add the counter so we can never get a dup ip assuming counter < 10 which
        # should always be the case here. Add 2 to avoid using .0 .1 which have special meaning.
        num=$((num - num % 10 + add + 2))
    fi
    printf "$net_ip%s" $num
}

#########################
#  gateway_from_subnet  # get the first ip from a given subnet
#########################
# the first arg must be an subnet created by random_subnet
# otherwise this function might return an invalid ip
function gateway_from_subnet() {
    # first trim subnet
    local net_ip=${1%/*}
    # set first ip in network as gateway
    local num=1
    # if ip has dor it is ipv4
    if [[ "$net_ip" == *"."* ]]; then
        # if ipv4 we have to trim the final 0
        net_ip=${net_ip%0}
    fi
    printf "$net_ip%s" $num
}

function create_netns() {
    # create a new netns and mountns and run a sleep process to keep it alive
    # we have to redirect stdout/err to /dev/null otherwise bats will hang
    unshare -mn sleep inf &>/dev/null &
    pid=$!

    # we have to wait for unshare and check that we have a new ns before returning
    local timeout=2
    while [[ $timeout -gt 0 ]]; do
        if [ "$(readlink /proc/self/ns/net)" != "$(readlink /proc/$pid/ns/net)" ]; then
            echo $pid
            return
        fi
        sleep 1
        let timeout=$timeout-1
    done

    die "Timed out waiting for unshare new netns"
}

function get_container_netns_path() {
    echo /proc/$1/ns/net
}

################
#  run_netavark  #  Invoke $NETAVARK, with timeout, using BATS 'run'
################
#
# This is the preferred mechanism for invoking netavark: first, it
# it joins the test network namespace before it invokes $NETAVARK,
# which may be 'netavark' or '/some/path/netavark'.
function run_netavark() {
    run_in_host_netns $NETAVARK "--config" "$AARDVARK_TMPDIR" "-a" "$AARDVARK" "$@"
}

################
#  run_in_container_netns  #  Run args in container netns
################
#
# first arg must be the container pid
function run_in_container_netns() {
    con_pid=$1
    shift
    run_helper nsenter -n -t $con_pid "$@"
}

################
#  run_in_host_netns  #  Run args in host netns
################
#
function run_in_host_netns() {
    run_helper nsenter -m -n -t $HOST_NS_PID "$@"
}

################
#  create_config#  Creates a config netavark can use
################
#
# The following arguments are supported, the order does not matter:
#     network_name=$network_name
#     container_id=$container_id
#     container_name=$container_name
#     subnet=$subnet specifies the network subnet
#     custom_dns_serve=$custom_dns_server
#     aliases=$aliases comma seperated container aliases for dns resolution.
#     internal={true,false} default is false
function create_config() {
    local network_name=""
    local container_id=""
    local container_name=""
    local subnet=""
    local custom_dns_server
    local aliases=""
    local internal=false

     # parse arguments
    while [[ "$#" -gt 0 ]]; do
        IFS='=' read -r arg value <<<"$1"
        case "$arg" in
        network_name)
            network_name="$value"
            ;;
        container_id)
            container_id="$value"
            ;;
        container_name)
            container_name="$value"
            ;;
        subnet)
            subnet="$value"
            ;;
        custom_dns_server)
            custom_dns_server="$value"
            ;;
        aliases)
            aliases="$value"
            ;;
        internal)
            internal="$value"
            ;;
        *) die "unknown argument for '$arg' create_config" ;;
        esac
        shift
    done

    container_ip=$(random_ip_in_subnet $subnet $IP_COUNT)
    IP_COUNT=$((IP_COUNT + 1))
    container_gw=$(gateway_from_subnet $subnet)
    subnets="{\"subnet\":\"$subnet\",\"gateway\":\"$container_gw\"}"

    create_network "$network_name" "$container_ip" "eth0" "$aliases"
    create_network_infos "$network_name" $(random_string 64) "$subnets" "$internal"

    read -r -d '\0' config <<EOF
{
  "container_id": "$container_id",
  "container_name": "$container_name",
  "networks": {
      $new_network
  },
  "network_info": {
      $new_network_info
  },
  "dns_servers": [$custom_dns_server]
}\0
EOF

}

################
#  create_network infos#  Creates a network_info json blob for netavark
################
# arg1 is network name
# arg2 network_id
# arg3 is subnets
# arg4 is internal
function create_network_infos() {
    local net_name=$1
    local net_id=$2
    local subnets=$3
    local internal=${4:-false}
    local interface_name=${net_name:0:7}

    read -r -d '\0' new_network_info <<EOF
    "$net_name": {
      "name": "$net_name",
      "id": "$net_id",
      "driver": "bridge",
      "network_interface": "$interface_name",
      "subnets": [
        $subnets
      ],
      "ipv6_enabled": true,
      "internal": $internal,
      "dns_enabled": true,
      "ipam_options": {
        "driver": "host-local"
      }
    }\0
EOF

}

################
#  create_network#  Creates a network json blob for netavark
################
# arg is network name
# arg is ip address
# arg is interface (ethX)
# arg are aliases
function create_network() {
    local net_name=$1
    shift
    local ip_address=$1
    shift
    local interface_name=$1
    shift
    local aliases=$1

    read -r -d '\0' new_network <<EOF
    "$net_name": {
      "static_ips": [
        "$ip_address"
	],
	  "aliases": [
		$aliases
	],
      "interface_name": "$interface_name"
    }\0
EOF

}

################
#  create container#  Creates a netns that mimics a container
################
# arg1 is config
function create_container() {
    CONTAINER_NS_PID=$(create_netns)
    CONTAINER_NS_PIDS+=("$CONTAINER_NS_PID")
    CONTAINER_CONFIGS+=("$1")
    create_container_backend "$CONTAINER_NS_PID" "$1"
}

# arg1 is pid
# arg2 is config
function create_container_backend() {
    run_netavark setup $(get_container_netns_path $1) <<<"$2"
}

################
#  connect#  Connects netns to another network
################
# arg1 is pid
# arg2 is config
function connect() {
    create_container_backend "$1" "$2"
}

function basic_host_setup() {
    HOST_NS_PID=$(create_netns)
    # make sure to set DBUS_SYSTEM_BUS_ADDRESS to an empty value
    # netavark will try to use firewalld connection when possible
    # because we run in a separate netns we cannot use firewalld
    # firewalld run in the host netns and not our custom netns
    # thus the firewall rules end up in the wrong netns
    # unsetting does not work, it would use the default address
    export DBUS_SYSTEM_BUS_ADDRESS=
    AARDVARK_TMPDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} aardvark_bats.XXXXXX)

    IP_COUNT=0
}

function setup_dnsmasq() {
    command -v dnsmasq || die "dnsmasq not installed"

    run_in_host_netns ip link set lo up
    run_in_host_netns dnsmasq --conf-file=$TESTSDIR/dnsmasq.conf --pid-file="$AARDVARK_TMPDIR/dnsmasq.pid"
    DNSMASQ_PID=$(cat $AARDVARK_TMPDIR/dnsmasq.pid)

    # create new resolv.conf with dnsmasq dns
    echo "nameserver 127.0.0.1" >"$AARDVARK_TMPDIR/resolv.conf"
    run_in_host_netns mount --bind "$AARDVARK_TMPDIR/resolv.conf" /etc/resolv.conf
}

function basic_teardown() {
    # Now call netavark with all the configs and then kill the netns associated with it
    for i in "${!CONTAINER_CONFIGS[@]}"; do
        netavark_teardown $(get_container_netns_path "${CONTAINER_NS_PIDS[$i]}") "${CONTAINER_CONFIGS[$i]}"
        kill -9 "${CONTAINER_NS_PIDS[$i]}"
    done

    if [[ -n "$DNSMASQ_PID" ]]; then
        kill -9 $DNSMASQ_PID
        DNSMASQ_PID=""
    fi

    # Finally kill the host netns
    if [ ! -z "$HOST_NS_PID" ]; then
        echo "$HOST_NS_PID"
        kill -9 "$HOST_NS_PID"
    fi

    rm -fr "$AARDVARK_TMPDIR"
}

################
#  netavark_teardown#  tears down a network
################
function netavark_teardown() {
    run_netavark teardown $1 <<<"$2"
}

function teardown() {
    basic_teardown
}

function dig() {
    # first arg is container_netns_pid
    # second arg is name
    # third arg is server addr
    run_in_container_netns "$1" "dig" "+short" "$2" "@$3" $4
}

function dig_reverse() {
    # first arg is container_netns_pid
    # second arg is the IP address
    # third arg is server addr
    run_in_container_netns "$1" "dig" "-x" "$2" "@$3"
}

function setup() {
    basic_host_setup
}
07070100000054000041ED00000000000000000000000267AA032200000000000000000000000000000000000000000000001D00000000aardvark-dns-1.14.0/test/tmt07070100000055000081A400000000000000000000000167AA032200000293000000000000000000000000000000000000002600000000aardvark-dns-1.14.0/test/tmt/main.fmf# Only common dependencies that are NOT required to run netavark-tests.sh are
# specified here. Everything else is in netavark-tests.sh.
require:
    - bats
    - bind-utils
    - cargo
    - clippy
    - go-md2man
    - iptables
    - jq
    - make
    - netavark
    - nftables
    - nmap-ncat
    - rustfmt
    - dnsmasq

adjust:
    duration: 10m
    when: arch == aarch64

/validate:
    tag: upstream
    summary: Validate test
    test: make -C ../.. validate

/unit:
    tag: upstream
    summary: Unit test
    test: make -C ../.. unit

/integration:
    tag: [ upstream, downstream]
    summary: Integration tests
    test: bash test_integration.sh
07070100000056000081A400000000000000000000000167AA0322000001FC000000000000000000000000000000000000003100000000aardvark-dns-1.14.0/test/tmt/test_integration.sh#!/usr/bin/env bash

set -exo pipefail

# Remove testing-farm repos if they exist because they interfere with the
# podman-next copr. The default distro repos will not be removed and can be
# used wherever relevant.
rm -f /etc/yum.repos.d/tag-repository.repo

# We want the netavark build from podman-next, so we update it after removing
# testing-farm repo.
dnf -y update netavark

rpm -q aardvark-dns cargo netavark nftables

# Run tests
make -C ../.. AARDVARK=/usr/libexec/podman/aardvark-dns integration
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!493 blocks
openSUSE Build Service is sponsored by