File python-zeroconf-0.146.0.obscpio of Package python-zeroconf

07070100000000000081A400000000000000000000000167C7AD160000005C000000000000000000000000000000000000002400000000python-zeroconf-0.146.0/.coveragerc[report]
exclude_lines =
    pragma: no cover
    if TYPE_CHECKING:
    if sys.version_info
07070100000001000081A400000000000000000000000167C7AD1600000043000000000000000000000000000000000000002000000000python-zeroconf-0.146.0/.flake8[flake8]
exclude = docs
max-line-length = 180
extend-ignore = E203
07070100000002000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002000000000python-zeroconf-0.146.0/.github07070100000003000081A400000000000000000000000167C7AD16000002E4000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/.github/dependabot.yml# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file

version: 2
updates:
  - package-ecosystem: "github-actions"
    directory: "/"
    schedule:
      interval: "monthly"
    commit-message:
      prefix: "chore(ci): "
    groups:
      github-actions:
        patterns:
          - "*"
  - package-ecosystem: "pip" # See documentation for possible values
    directory: "/" # Location of package manifests
    schedule:
      interval: "weekly"
07070100000004000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/.github/workflows07070100000005000081A400000000000000000000000167C7AD16000022DF000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/.github/workflows/ci.ymlname: CI

on:
  push:
    branches:
      - master
  pull_request:

concurrency:
  group: ${{ github.head_ref || github.run_id }}
  cancel-in-progress: true

jobs:
  lint:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: actions/setup-python@v5
        with:
          python-version: "3.12"
      - uses: pre-commit/action@v3.0.1

  # Make sure commit messages follow the conventional commits convention:
  # https://www.conventionalcommits.org
  commitlint:
    name: Lint Commit Messages
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0
      - uses: wagoid/commitlint-github-action@v6

  test:
    strategy:
      fail-fast: false
      matrix:
        python-version:
          - "3.9"
          - "3.10"
          - "3.11"
          - "3.12"
          - "3.13"
          - "pypy-3.9"
          - "pypy-3.10"
        os:
          - ubuntu-latest
          - macos-latest
          - windows-latest
        extension:
          - "skip_cython"
          - "use_cython"
        exclude:
          - os: macos-latest
            extension: use_cython
          - os: windows-latest
            extension: use_cython
          - os: windows-latest
            python-version: "pypy-3.9"
          - os: windows-latest
            python-version: "pypy-3.10"
          - os: macos-latest
            python-version: "pypy-3.9"
          - os: macos-latest
            python-version: "pypy-3.10"
    runs-on: ${{ matrix.os }}
    steps:
      - uses: actions/checkout@v4
      - name: Install poetry
        run: pipx install poetry
      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: ${{ matrix.python-version }}
          cache: "poetry"
          allow-prereleases: true
      - name: Install Dependencies no cython
        if: ${{ matrix.extension == 'skip_cython' }}
        env:
          SKIP_CYTHON: 1
        run: poetry install --only=main,dev
      - name: Install Dependencies with cython
        if: ${{ matrix.extension != 'skip_cython' }}
        env:
          REQUIRE_CYTHON: 1
        run: poetry install --only=main,dev
      - name: Test with Pytest
        run: poetry run pytest --durations=20 --timeout=60 -v --cov=zeroconf --cov-branch --cov-report xml --cov-report html --cov-report term-missing tests
      - name: Upload coverage to Codecov
        uses: codecov/codecov-action@v5
        with:
          token: ${{ secrets.CODECOV_TOKEN }}

  benchmark:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - name: Setup Python 3.13
        uses: actions/setup-python@v5
        with:
          python-version: 3.13
      - uses: snok/install-poetry@v1.4.1
      - name: Install Dependencies
        run: |
          REQUIRE_CYTHON=1 poetry install --only=main,dev
        shell: bash
      - name: Run benchmarks
        uses: CodSpeedHQ/action@v3
        with:
          token: ${{ secrets.CODSPEED_TOKEN }}
          run: poetry run pytest --no-cov -vvvvv --codspeed tests/benchmarks

  release:
    needs:
      - test
      - lint
      - commitlint
    if: ${{ github.repository_owner }} == "python-zeroconf"

    runs-on: ubuntu-latest
    environment: release
    concurrency: release
    permissions:
      id-token: write
      contents: write
    outputs:
      released: ${{ steps.release.outputs.released }}
      newest_release_tag: ${{ steps.release.outputs.tag }}

    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0
          ref: ${{ github.head_ref || github.ref_name }}

      # Do a dry run of PSR
      - name: Test release
        uses: python-semantic-release/python-semantic-release@v9.21.0
        if: github.ref_name != 'master'
        with:
          root_options: --noop

      # On main branch: actual PSR + upload to PyPI & GitHub
      - name: Release
        uses: python-semantic-release/python-semantic-release@v9.21.0
        id: release
        if: github.ref_name == 'master'
        with:
          github_token: ${{ secrets.GITHUB_TOKEN }}

      - name: Publish package distributions to PyPI
        uses: pypa/gh-action-pypi-publish@release/v1
        if: steps.release.outputs.released == 'true'

      - name: Publish package distributions to GitHub Releases
        uses: python-semantic-release/upload-to-gh-release@main
        if: steps.release.outputs.released == 'true'
        with:
          github_token: ${{ secrets.GITHUB_TOKEN }}

  build_wheels:
    needs: [release]
    if: needs.release.outputs.released == 'true'

    name: Wheels for ${{ matrix.os }} (${{ matrix.musl == 'musllinux' && 'musllinux' || 'manylinux' }}) ${{ matrix.qemu }} ${{ matrix.pyver }}
    runs-on: ${{ matrix.os }}
    strategy:
      matrix:
        os:
          [
            ubuntu-24.04-arm,
            ubuntu-latest,
            windows-2019,
            macos-13,
            macos-latest,
          ]
        qemu: [""]
        musl: [""]
        pyver: [""]
        include:
          - os: ubuntu-latest
            musl: "musllinux"
          - os: ubuntu-24.04-arm
            musl: "musllinux"
          # qemu is slow, make a single
          # runner per Python version
          - os: ubuntu-latest
            qemu: armv7l
            musl: "musllinux"
            pyver: cp39
          - os: ubuntu-latest
            qemu: armv7l
            musl: "musllinux"
            pyver: cp310
          - os: ubuntu-latest
            qemu: armv7l
            musl: "musllinux"
            pyver: cp311
          - os: ubuntu-latest
            qemu: armv7l
            musl: "musllinux"
            pyver: cp312
          - os: ubuntu-latest
            qemu: armv7l
            musl: "musllinux"
            pyver: cp313
          # qemu is slow, make a single
          # runner per Python version
          - os: ubuntu-latest
            qemu: armv7l
            musl: ""
            pyver: cp39
          - os: ubuntu-latest
            qemu: armv7l
            musl: ""
            pyver: cp310
          - os: ubuntu-latest
            qemu: armv7l
            musl: ""
            pyver: cp311
          - os: ubuntu-latest
            qemu: armv7l
            musl: ""
            pyver: cp312
          - os: ubuntu-latest
            qemu: armv7l
            musl: ""
            pyver: cp313
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          fetch-depth: 0
          ref: "master"
      # Used to host cibuildwheel
      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: "3.12"
      - name: Set up QEMU
        if: ${{ matrix.qemu }}
        uses: docker/setup-qemu-action@v3
        with:
          platforms: all
          # This should be temporary
          # xref https://github.com/docker/setup-qemu-action/issues/188
          # xref https://github.com/tonistiigi/binfmt/issues/215
          image: tonistiigi/binfmt:qemu-v8.1.5
        id: qemu
      - name: Prepare emulation
        if: ${{ matrix.qemu }}
        run: |
          if [[ -n "${{ matrix.qemu }}" ]]; then
            # Build emulated architectures only if QEMU is set,
            # use default "auto" otherwise
            echo "CIBW_ARCHS_LINUX=${{ matrix.qemu }}" >> $GITHUB_ENV
          fi
      - name: Limit to a specific Python version on slow QEMU
        if: ${{ matrix.pyver }}
        run: |
          if [[ -n "${{ matrix.pyver }}" ]]; then
            echo "CIBW_BUILD=${{ matrix.pyver }}*" >> $GITHUB_ENV
          fi

      - uses: actions/checkout@v4
        with:
          ref: ${{ needs.release.outputs.newest_release_tag }}
          fetch-depth: 0

      - name: Build wheels ${{ matrix.musl }} (${{ matrix.qemu }})
        uses: pypa/cibuildwheel@v2.23.0
        # to supply options, put them in 'env', like:
        env:
          CIBW_SKIP: cp36-* cp37-* pp36-* pp37-* pp38-* cp38-* ${{ matrix.musl == 'musllinux' && '*manylinux*' || '*musllinux*' }}
          CIBW_BEFORE_ALL_LINUX: apt install -y gcc || yum install -y gcc || apk add gcc
          REQUIRE_CYTHON: 1

      - uses: actions/upload-artifact@v4
        with:
          path: ./wheelhouse/*.whl
          name: wheels-${{ matrix.os }}-${{ matrix.musl }}-${{ matrix.qemu }}-${{ matrix.pyver }}

  upload_pypi:
    needs: [build_wheels]
    runs-on: ubuntu-latest
    environment: release

    steps:
      - uses: actions/download-artifact@v4
        with:
          # unpacks default artifact into dist/
          # if `name: artifact` is omitted, the action will create extra parent dir
          pattern: wheels-*
          path: dist
          merge-multiple: true

      - uses: pypa/gh-action-pypi-publish@v1.12.4
        with:
          user: __token__
          password: ${{ secrets.PYPI_TOKEN }}

          # To test: repository_url: https://test.pypi.org/legacy/
07070100000006000081A400000000000000000000000167C7AD16000000A6000000000000000000000000000000000000002300000000python-zeroconf-0.146.0/.gitignorebuild/
*.pyc
*.pyo
Thumbs.db
.DS_Store
.project
.pydevproject
.settings
.idea
.vslick
.cache
.mypy_cache/
docs/_build/
.vscode
/dist/
/zeroconf.egg-info/
/src/**/*.c
07070100000007000081A400000000000000000000000167C7AD160000073F000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/.pre-commit-config.yaml# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
exclude: "CHANGELOG.md"
default_stages: [pre-commit]

ci:
  autofix_commit_msg: "chore(pre-commit.ci): auto fixes"
  autoupdate_commit_msg: "chore(pre-commit.ci): pre-commit autoupdate"

repos:
  - repo: https://github.com/commitizen-tools/commitizen
    rev: v4.4.1
    hooks:
      - id: commitizen
        stages: [commit-msg]
  - repo: https://github.com/pre-commit/pre-commit-hooks
    rev: v5.0.0
    hooks:
      - id: check-builtin-literals
      - id: check-case-conflict
      - id: check-docstring-first
      - id: check-json
      - id: check-shebang-scripts-are-executable
      - id: check-toml
      - id: check-xml
      - id: check-yaml
      - id: debug-statements
      - id: detect-private-key
      - id: end-of-file-fixer
      - id: trailing-whitespace
  - repo: https://github.com/pre-commit/mirrors-prettier
    rev: v4.0.0-alpha.8
    hooks:
      - id: prettier
        args: ["--tab-width", "2"]
        files: ".(css|html|js|json|md|toml|yaml)$"
  - repo: https://github.com/asottile/pyupgrade
    rev: v3.19.1
    hooks:
      - id: pyupgrade
        args: [--py39-plus]
  - repo: https://github.com/astral-sh/ruff-pre-commit
    rev: v0.9.9
    hooks:
      - id: ruff
        args: [--fix, --exit-non-zero-on-fix]
      - id: ruff-format
  - repo: https://github.com/codespell-project/codespell
    rev: v2.4.1
    hooks:
      - id: codespell
  - repo: https://github.com/PyCQA/flake8
    rev: 7.1.2
    hooks:
      - id: flake8
  - repo: https://github.com/pre-commit/mirrors-mypy
    rev: v1.15.0
    hooks:
      - id: mypy
        additional_dependencies: [ifaddr]
  - repo: https://github.com/MarcoGorelli/cython-lint
    rev: v0.16.6
    hooks:
      - id: cython-lint
      - id: double-quote-cython-strings
07070100000008000081A400000000000000000000000167C7AD16000001EA000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/.readthedocs.yaml# Read the Docs configuration file for Sphinx projects
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2

build:
  os: ubuntu-24.04
  tools:
    python: "3.12"
  jobs:
    post_install:
      # https://docs.readthedocs.com/platform/stable/build-customization.html#install-dependencies-with-poetry
      - pip install poetry
      - SKIP_CYTHON=1 VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs

sphinx:
  configuration: docs/conf.py
07070100000009000081A400000000000000000000000167C7AD160000E500000000000000000000000000000000000000002500000000python-zeroconf-0.146.0/CHANGELOG.md# CHANGELOG


## v0.146.0 (2025-03-05)

### Features

- Reduce size of wheels ([#1540](https://github.com/python-zeroconf/python-zeroconf/pull/1540),
  [`dea233c`](https://github.com/python-zeroconf/python-zeroconf/commit/dea233c1e0e80584263090727ce07648755964af))

feat: reduce size of binaries


## v0.145.1 (2025-02-18)

### Bug Fixes

- Hold a strong reference to the AsyncEngine setup task
  ([#1533](https://github.com/python-zeroconf/python-zeroconf/pull/1533),
  [`d4e6f25`](https://github.com/python-zeroconf/python-zeroconf/commit/d4e6f25754c15417b8bd9839dc8636b2cff717c8))


## v0.145.0 (2025-02-15)

### Features

- **docs**: Enable link to source code
  ([#1529](https://github.com/python-zeroconf/python-zeroconf/pull/1529),
  [`1c7f354`](https://github.com/python-zeroconf/python-zeroconf/commit/1c7f3548b6cbddf73dbb9d69cd8987c8ad32c705))


## v0.144.3 (2025-02-14)

### Bug Fixes

- Non unique name during wheel upload
  ([#1527](https://github.com/python-zeroconf/python-zeroconf/pull/1527),
  [`43136fa`](https://github.com/python-zeroconf/python-zeroconf/commit/43136fa418d4d7826415e1d0f7761b198347ced7))


## v0.144.2 (2025-02-14)

### Bug Fixes

- Add a helpful hint for when EADDRINUSE happens during startup
  ([#1526](https://github.com/python-zeroconf/python-zeroconf/pull/1526),
  [`48dbb71`](https://github.com/python-zeroconf/python-zeroconf/commit/48dbb7190a4f5126e39dbcdb87e34380d4562cd0))


## v0.144.1 (2025-02-12)

### Bug Fixes

- Wheel builds failing after adding armv7l builds
  ([#1518](https://github.com/python-zeroconf/python-zeroconf/pull/1518),
  [`e7adac9`](https://github.com/python-zeroconf/python-zeroconf/commit/e7adac9c59fc4d0c4822c6097a4daee3d68eb4de))


## v0.144.0 (2025-02-12)

### Features

- Add armv7l wheel builds ([#1517](https://github.com/python-zeroconf/python-zeroconf/pull/1517),
  [`39887b8`](https://github.com/python-zeroconf/python-zeroconf/commit/39887b80328d616e8e6f6ca9d08aecc06f7b0711))


## v0.143.1 (2025-02-12)

### Bug Fixes

- Make no buffer space available when adding multicast memberships forgiving
  ([#1516](https://github.com/python-zeroconf/python-zeroconf/pull/1516),
  [`f377d5c`](https://github.com/python-zeroconf/python-zeroconf/commit/f377d5cd08d724282c8487785163b466f3971344))


## v0.143.0 (2025-01-31)

### Features

- Eliminate async_timeout dep on python less than 3.11
  ([#1500](https://github.com/python-zeroconf/python-zeroconf/pull/1500),
  [`44457be`](https://github.com/python-zeroconf/python-zeroconf/commit/44457be4571add2f851192db3b37a96d9d27b00e))


## v0.142.0 (2025-01-30)

### Features

- Add simple address resolvers and examples
  ([#1499](https://github.com/python-zeroconf/python-zeroconf/pull/1499),
  [`ae3c352`](https://github.com/python-zeroconf/python-zeroconf/commit/ae3c3523e5f2896989d0b932d53ef1e24ef4aee8))


## v0.141.0 (2025-01-22)

### Features

- Speed up adding and expiring records in the DNSCache
  ([#1490](https://github.com/python-zeroconf/python-zeroconf/pull/1490),
  [`628b136`](https://github.com/python-zeroconf/python-zeroconf/commit/628b13670d04327dd8d4908842f31b476598c7e8))


## v0.140.1 (2025-01-17)

### Bug Fixes

- Wheel builds for aarch64 ([#1485](https://github.com/python-zeroconf/python-zeroconf/pull/1485),
  [`9d228e2`](https://github.com/python-zeroconf/python-zeroconf/commit/9d228e28eead1561deda696e8837d59896cbc98d))


## v0.140.0 (2025-01-17)

### Bug Fixes

- **docs**: Remove repetition of words
  ([#1479](https://github.com/python-zeroconf/python-zeroconf/pull/1479),
  [`dde26c6`](https://github.com/python-zeroconf/python-zeroconf/commit/dde26c655a49811c11071b0531e408a188687009))

Co-authored-by: J. Nick Koston <nick@koston.org>

### Features

- Migrate to native types ([#1472](https://github.com/python-zeroconf/python-zeroconf/pull/1472),
  [`22a0fb4`](https://github.com/python-zeroconf/python-zeroconf/commit/22a0fb487db27bc2c6448a9167742f3040e910ba))

Co-authored-by: J. Nick Koston <nick@koston.org>

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>

- Small performance improvement to writing outgoing packets
  ([#1482](https://github.com/python-zeroconf/python-zeroconf/pull/1482),
  [`d9be715`](https://github.com/python-zeroconf/python-zeroconf/commit/d9be7155a0ef1ac521e5bbedd3884ddeb9f0b99d))


## v0.139.0 (2025-01-09)

### Features

- Implement heapq for tracking cache expire times
  ([#1465](https://github.com/python-zeroconf/python-zeroconf/pull/1465),
  [`09db184`](https://github.com/python-zeroconf/python-zeroconf/commit/09db1848957b34415f364b7338e4adce99b57abc))


## v0.138.1 (2025-01-08)

### Bug Fixes

- Ensure cache does not return stale created and ttl values
  ([#1469](https://github.com/python-zeroconf/python-zeroconf/pull/1469),
  [`e05055c`](https://github.com/python-zeroconf/python-zeroconf/commit/e05055c584ca46080990437b2b385a187bc48458))


## v0.138.0 (2025-01-08)

### Features

- Improve performance of processing incoming records
  ([#1467](https://github.com/python-zeroconf/python-zeroconf/pull/1467),
  [`ebbb2af`](https://github.com/python-zeroconf/python-zeroconf/commit/ebbb2afccabd3841a3cb0a39824b49773cc6258a))

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>


## v0.137.2 (2025-01-06)

### Bug Fixes

- Split wheel builds to avoid timeout
  ([#1461](https://github.com/python-zeroconf/python-zeroconf/pull/1461),
  [`be05f0d`](https://github.com/python-zeroconf/python-zeroconf/commit/be05f0dc4f6b2431606031a7bb24585728d15f01))


## v0.137.1 (2025-01-06)

### Bug Fixes

- Move wheel builds to macos-13
  ([#1459](https://github.com/python-zeroconf/python-zeroconf/pull/1459),
  [`4ff48a0`](https://github.com/python-zeroconf/python-zeroconf/commit/4ff48a01bc76c82e5710aafaf6cf6e79c069cd85))


## v0.137.0 (2025-01-06)

### Features

- Speed up parsing incoming records
  ([#1458](https://github.com/python-zeroconf/python-zeroconf/pull/1458),
  [`783c1b3`](https://github.com/python-zeroconf/python-zeroconf/commit/783c1b37d1372c90dfce658c66d03aa753afbf49))


## v0.136.2 (2024-11-21)

### Bug Fixes

- Retrigger release from failed github workflow
  ([#1443](https://github.com/python-zeroconf/python-zeroconf/pull/1443),
  [`2ea705d`](https://github.com/python-zeroconf/python-zeroconf/commit/2ea705d850c1cb096c87372d5ec855f684603d01))


## v0.136.1 (2024-11-21)

### Bug Fixes

- **ci**: Run release workflow only on main repository
  ([#1441](https://github.com/python-zeroconf/python-zeroconf/pull/1441),
  [`f637c75`](https://github.com/python-zeroconf/python-zeroconf/commit/f637c75f638ba20c193e58ff63c073a4003430b9))

- **docs**: Update python to 3.8
  ([#1430](https://github.com/python-zeroconf/python-zeroconf/pull/1430),
  [`483d067`](https://github.com/python-zeroconf/python-zeroconf/commit/483d0673d4ae3eec37840452723fc1839a6cc95c))


## v0.136.0 (2024-10-26)

### Bug Fixes

- Add ignore for .c file for wheels
  ([#1424](https://github.com/python-zeroconf/python-zeroconf/pull/1424),
  [`6535963`](https://github.com/python-zeroconf/python-zeroconf/commit/6535963b5b789ce445e77bb728a5b7ee4263e582))

- Correct typos ([#1422](https://github.com/python-zeroconf/python-zeroconf/pull/1422),
  [`3991b42`](https://github.com/python-zeroconf/python-zeroconf/commit/3991b4256b8de5b37db7a6144e5112f711b2efef))

- Update python-semantic-release to fix release process
  ([#1426](https://github.com/python-zeroconf/python-zeroconf/pull/1426),
  [`2f20155`](https://github.com/python-zeroconf/python-zeroconf/commit/2f201558d0ab089cdfebb18d2d7bb5785b2cce16))

### Features

- Use SPDX license identifier
  ([#1425](https://github.com/python-zeroconf/python-zeroconf/pull/1425),
  [`1596145`](https://github.com/python-zeroconf/python-zeroconf/commit/1596145452721e0de4e2a724b055e8e290792d3e))


## v0.135.0 (2024-09-24)

### Features

- Improve performance of DNSCache backend
  ([#1415](https://github.com/python-zeroconf/python-zeroconf/pull/1415),
  [`1df2e69`](https://github.com/python-zeroconf/python-zeroconf/commit/1df2e691ff11c9592e1cdad5599fb6601eb1aa3f))


## v0.134.0 (2024-09-08)

### Bug Fixes

- Improve helpfulness of ServiceInfo.request assertions
  ([#1408](https://github.com/python-zeroconf/python-zeroconf/pull/1408),
  [`9262626`](https://github.com/python-zeroconf/python-zeroconf/commit/9262626895d354ed7376aa567043b793c37a985e))

### Features

- Improve performance when IP addresses change frequently
  ([#1407](https://github.com/python-zeroconf/python-zeroconf/pull/1407),
  [`111c91a`](https://github.com/python-zeroconf/python-zeroconf/commit/111c91ab395a7520e477eb0e75d5924fba3c64c7))


## v0.133.0 (2024-08-27)

### Features

- Add classifier for python 3.13
  ([#1393](https://github.com/python-zeroconf/python-zeroconf/pull/1393),
  [`7fb2bb2`](https://github.com/python-zeroconf/python-zeroconf/commit/7fb2bb21421c70db0eb288fa7e73d955f58b0f5d))

- Enable building of arm64 macOS builds
  ([#1384](https://github.com/python-zeroconf/python-zeroconf/pull/1384),
  [`0df2ce0`](https://github.com/python-zeroconf/python-zeroconf/commit/0df2ce0e6f7313831da6a63d477019982d5df55c))

Co-authored-by: Alex Ciobanu <alex@rogue-research.com>

Co-authored-by: J. Nick Koston <nick@koston.org>

- Improve performance of ip address caching
  ([#1392](https://github.com/python-zeroconf/python-zeroconf/pull/1392),
  [`f7c7708`](https://github.com/python-zeroconf/python-zeroconf/commit/f7c77081b2f8c70b1ed6a9b9751a86cf91f9aae2))

- Python 3.13 support ([#1390](https://github.com/python-zeroconf/python-zeroconf/pull/1390),
  [`98cfa83`](https://github.com/python-zeroconf/python-zeroconf/commit/98cfa83710e43880698353821bae61108b08cb2f))


## v0.132.2 (2024-04-13)

### Bug Fixes

- Bump cibuildwheel to fix wheel builds
  ([#1371](https://github.com/python-zeroconf/python-zeroconf/pull/1371),
  [`83e4ce3`](https://github.com/python-zeroconf/python-zeroconf/commit/83e4ce3e31ddd4ae9aec2f8c9d84d7a93f8be210))

- Update references to minimum-supported python version of 3.8
  ([#1369](https://github.com/python-zeroconf/python-zeroconf/pull/1369),
  [`599524a`](https://github.com/python-zeroconf/python-zeroconf/commit/599524a5ce1e4c1731519dd89377c2a852e59935))


## v0.132.1 (2024-04-12)

### Bug Fixes

- Set change during iteration when dispatching listeners
  ([#1370](https://github.com/python-zeroconf/python-zeroconf/pull/1370),
  [`e9f8aa5`](https://github.com/python-zeroconf/python-zeroconf/commit/e9f8aa5741ae2d490c33a562b459f0af1014dbb0))


## v0.132.0 (2024-04-01)

### Bug Fixes

- Avoid including scope_id in IPv6Address object if its zero
  ([#1367](https://github.com/python-zeroconf/python-zeroconf/pull/1367),
  [`edc4a55`](https://github.com/python-zeroconf/python-zeroconf/commit/edc4a556819956c238a11332052000dcbcb07e3d))

### Features

- Drop python 3.7 support ([#1359](https://github.com/python-zeroconf/python-zeroconf/pull/1359),
  [`4877829`](https://github.com/python-zeroconf/python-zeroconf/commit/4877829e6442de5426db152d11827b1ba85dbf59))

- Make async_get_service_info available on the Zeroconf object
  ([#1366](https://github.com/python-zeroconf/python-zeroconf/pull/1366),
  [`c4c2dee`](https://github.com/python-zeroconf/python-zeroconf/commit/c4c2deeb05279ddbb0eba1330c7ae58795fea001))


## v0.131.0 (2023-12-19)

### Features

- Small speed up to constructing outgoing packets
  ([#1354](https://github.com/python-zeroconf/python-zeroconf/pull/1354),
  [`517d7d0`](https://github.com/python-zeroconf/python-zeroconf/commit/517d7d00ca7738c770077738125aec0e4824c000))

- Speed up processing incoming packets
  ([#1352](https://github.com/python-zeroconf/python-zeroconf/pull/1352),
  [`6c15325`](https://github.com/python-zeroconf/python-zeroconf/commit/6c153258a995cf9459a6f23267b7e379b5e2550f))

- Speed up the query handler ([#1350](https://github.com/python-zeroconf/python-zeroconf/pull/1350),
  [`9eac0a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9eac0a122f28a7a4fa76cbfdda21d9a3571d7abb))


## v0.130.0 (2023-12-16)

### Bug Fixes

- Ensure IPv6 scoped address construction uses the string cache
  ([#1336](https://github.com/python-zeroconf/python-zeroconf/pull/1336),
  [`f78a196`](https://github.com/python-zeroconf/python-zeroconf/commit/f78a196db632c4fe017a34f1af8a58903c15a575))

- Ensure question history suppresses duplicates
  ([#1338](https://github.com/python-zeroconf/python-zeroconf/pull/1338),
  [`6f23656`](https://github.com/python-zeroconf/python-zeroconf/commit/6f23656576daa04e3de44e100f3ddd60ee4c560d))

- Microsecond precision loss in the query handler
  ([#1339](https://github.com/python-zeroconf/python-zeroconf/pull/1339),
  [`6560fad`](https://github.com/python-zeroconf/python-zeroconf/commit/6560fad584e0d392962c9a9248759f17c416620e))

- Scheduling race with the QueryScheduler
  ([#1347](https://github.com/python-zeroconf/python-zeroconf/pull/1347),
  [`cf40470`](https://github.com/python-zeroconf/python-zeroconf/commit/cf40470b89f918d3c24d7889d3536f3ffa44846c))

### Features

- Make ServiceInfo aware of question history
  ([#1348](https://github.com/python-zeroconf/python-zeroconf/pull/1348),
  [`b9aae1d`](https://github.com/python-zeroconf/python-zeroconf/commit/b9aae1de07bf1491e873bc314f8a1d7996127ad3))

- Significantly improve efficiency of the ServiceBrowser scheduler
  ([#1335](https://github.com/python-zeroconf/python-zeroconf/pull/1335),
  [`c65d869`](https://github.com/python-zeroconf/python-zeroconf/commit/c65d869aec731b803484871e9d242a984f9f5848))

- Small performance improvement constructing outgoing questions
  ([#1340](https://github.com/python-zeroconf/python-zeroconf/pull/1340),
  [`157185f`](https://github.com/python-zeroconf/python-zeroconf/commit/157185f28bf1e83e6811e2a5cd1fa9b38966f780))

- Small performance improvement for converting time
  ([#1342](https://github.com/python-zeroconf/python-zeroconf/pull/1342),
  [`73d3ab9`](https://github.com/python-zeroconf/python-zeroconf/commit/73d3ab90dd3b59caab771235dd6dbedf05bfe0b3))

- Small performance improvement for ServiceInfo asking questions
  ([#1341](https://github.com/python-zeroconf/python-zeroconf/pull/1341),
  [`810a309`](https://github.com/python-zeroconf/python-zeroconf/commit/810a3093c5a9411ee97740b468bd706bdf4a95de))

- Small speed up to processing incoming records
  ([#1345](https://github.com/python-zeroconf/python-zeroconf/pull/1345),
  [`7de655b`](https://github.com/python-zeroconf/python-zeroconf/commit/7de655b6f05012f20a3671e0bcdd44a1913d7b52))

- Small speed up to ServiceInfo construction
  ([#1346](https://github.com/python-zeroconf/python-zeroconf/pull/1346),
  [`b329d99`](https://github.com/python-zeroconf/python-zeroconf/commit/b329d99917bb731b4c70bf20c7c010eeb85ad9fd))


## v0.129.0 (2023-12-13)

### Features

- Add decoded_properties method to ServiceInfo
  ([#1332](https://github.com/python-zeroconf/python-zeroconf/pull/1332),
  [`9b595a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9b595a1dcacf109c699953219d70fe36296c7318))

- Cache is_unspecified for zeroconf ip address objects
  ([#1331](https://github.com/python-zeroconf/python-zeroconf/pull/1331),
  [`a1c84dc`](https://github.com/python-zeroconf/python-zeroconf/commit/a1c84dc6adeebd155faec1a647c0f70d70de2945))

- Ensure ServiceInfo.properties always returns bytes
  ([#1333](https://github.com/python-zeroconf/python-zeroconf/pull/1333),
  [`d29553a`](https://github.com/python-zeroconf/python-zeroconf/commit/d29553ab7de6b7af70769ddb804fe2aaf492f320))


## v0.128.5 (2023-12-13)

### Bug Fixes

- Performance regression with ServiceInfo IPv6Addresses
  ([#1330](https://github.com/python-zeroconf/python-zeroconf/pull/1330),
  [`e2f9f81`](https://github.com/python-zeroconf/python-zeroconf/commit/e2f9f81dbc54c3dd527eeb3298897d63f99d33f4))


## v0.128.4 (2023-12-10)

### Bug Fixes

- Re-expose ServiceInfo._set_properties for backwards compat
  ([#1327](https://github.com/python-zeroconf/python-zeroconf/pull/1327),
  [`39c4005`](https://github.com/python-zeroconf/python-zeroconf/commit/39c40051d7a63bdc63a3e2dfa20bd944fee4e761))


## v0.128.3 (2023-12-10)

### Bug Fixes

- Correct nsec record writing
  ([#1326](https://github.com/python-zeroconf/python-zeroconf/pull/1326),
  [`cd7a16a`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7a16a32c37b2f7a2e90d3c749525a5393bad57))


## v0.128.2 (2023-12-10)

### Bug Fixes

- Match cython version for dev deps to build deps
  ([#1325](https://github.com/python-zeroconf/python-zeroconf/pull/1325),
  [`a0dac46`](https://github.com/python-zeroconf/python-zeroconf/commit/a0dac46c01202b3d5a0823ac1928fc1d75332522))

- Timestamps missing double precision
  ([#1324](https://github.com/python-zeroconf/python-zeroconf/pull/1324),
  [`ecea4e4`](https://github.com/python-zeroconf/python-zeroconf/commit/ecea4e4217892ca8cf763074ac3e5d1b898acd21))


## v0.128.1 (2023-12-10)

### Bug Fixes

- Correct handling of IPv6 addresses with scope_id in ServiceInfo
  ([#1322](https://github.com/python-zeroconf/python-zeroconf/pull/1322),
  [`1682991`](https://github.com/python-zeroconf/python-zeroconf/commit/1682991b985b1f7b2bf0cff1a7eb7793070e7cb1))


## v0.128.0 (2023-12-02)

### Features

- Speed up unpacking TXT record data in ServiceInfo
  ([#1318](https://github.com/python-zeroconf/python-zeroconf/pull/1318),
  [`a200842`](https://github.com/python-zeroconf/python-zeroconf/commit/a20084281e66bdb9c37183a5eb992435f5b866ac))


## v0.127.0 (2023-11-15)

### Features

- Small speed up to processing incoming dns records
  ([#1315](https://github.com/python-zeroconf/python-zeroconf/pull/1315),
  [`bfe4c24`](https://github.com/python-zeroconf/python-zeroconf/commit/bfe4c24881a7259713425df5ab00ffe487518841))

- Small speed up to writing outgoing packets
  ([#1316](https://github.com/python-zeroconf/python-zeroconf/pull/1316),
  [`cd28476`](https://github.com/python-zeroconf/python-zeroconf/commit/cd28476f6b0a6c2c733273fb24ddaac6c7bbdf65))

- Speed up incoming packet reader
  ([#1314](https://github.com/python-zeroconf/python-zeroconf/pull/1314),
  [`0d60b61`](https://github.com/python-zeroconf/python-zeroconf/commit/0d60b61538a5d4b6f44b2369333b6e916a0a55b4))


## v0.126.0 (2023-11-13)

### Features

- Speed up outgoing packet writer
  ([#1313](https://github.com/python-zeroconf/python-zeroconf/pull/1313),
  [`55cf4cc`](https://github.com/python-zeroconf/python-zeroconf/commit/55cf4ccdff886a136db4e2133d3e6cdd001a8bd6))

- Speed up writing name compression for outgoing packets
  ([#1312](https://github.com/python-zeroconf/python-zeroconf/pull/1312),
  [`9caeabb`](https://github.com/python-zeroconf/python-zeroconf/commit/9caeabb6d4659a25ea1251c1ee7bb824e05f3d8b))


## v0.125.0 (2023-11-12)

### Features

- Speed up service browser queries when browsing many types
  ([#1311](https://github.com/python-zeroconf/python-zeroconf/pull/1311),
  [`d192d33`](https://github.com/python-zeroconf/python-zeroconf/commit/d192d33b1f05aa95a89965e86210aec086673a17))


## v0.124.0 (2023-11-12)

### Features

- Avoid decoding known answers if we have no answers to give
  ([#1308](https://github.com/python-zeroconf/python-zeroconf/pull/1308),
  [`605dc9c`](https://github.com/python-zeroconf/python-zeroconf/commit/605dc9ccd843a535802031f051b3d93310186ad1))

- Small speed up to process incoming packets
  ([#1309](https://github.com/python-zeroconf/python-zeroconf/pull/1309),
  [`56ef908`](https://github.com/python-zeroconf/python-zeroconf/commit/56ef90865189c01d2207abcc5e2efe3a7a022fa1))


## v0.123.0 (2023-11-12)

### Features

- Speed up instances only used to lookup answers
  ([#1307](https://github.com/python-zeroconf/python-zeroconf/pull/1307),
  [`0701b8a`](https://github.com/python-zeroconf/python-zeroconf/commit/0701b8ab6009891cbaddaa1d17116d31fd1b2f78))


## v0.122.3 (2023-11-09)

### Bug Fixes

- Do not build musllinux aarch64 wheels to reduce release time
  ([#1306](https://github.com/python-zeroconf/python-zeroconf/pull/1306),
  [`79aafb0`](https://github.com/python-zeroconf/python-zeroconf/commit/79aafb0acf7ca6b17976be7ede748008deada27b))


## v0.122.2 (2023-11-09)

### Bug Fixes

- Do not build aarch64 wheels for PyPy
  ([#1305](https://github.com/python-zeroconf/python-zeroconf/pull/1305),
  [`7e884db`](https://github.com/python-zeroconf/python-zeroconf/commit/7e884db4d958459e64257aba860dba2450db0687))


## v0.122.1 (2023-11-09)

### Bug Fixes

- Skip wheel builds for eol python and older python with aarch64
  ([#1304](https://github.com/python-zeroconf/python-zeroconf/pull/1304),
  [`6c8f5a5`](https://github.com/python-zeroconf/python-zeroconf/commit/6c8f5a5dec2072aa6a8f889c5d8a4623ab392234))


## v0.122.0 (2023-11-08)

### Features

- Build aarch64 wheels ([#1302](https://github.com/python-zeroconf/python-zeroconf/pull/1302),
  [`4fe58e2`](https://github.com/python-zeroconf/python-zeroconf/commit/4fe58e2edc6da64a8ece0e2b16ec9ebfc5b3cd83))


## v0.121.0 (2023-11-08)

### Features

- Speed up record updates ([#1301](https://github.com/python-zeroconf/python-zeroconf/pull/1301),
  [`d2af6a0`](https://github.com/python-zeroconf/python-zeroconf/commit/d2af6a0978f5abe4f8bb70d3e29d9836d0fd77c4))


## v0.120.0 (2023-11-05)

### Features

- Speed up decoding labels from incoming data
  ([#1291](https://github.com/python-zeroconf/python-zeroconf/pull/1291),
  [`c37ead4`](https://github.com/python-zeroconf/python-zeroconf/commit/c37ead4d7000607e81706a97b4cdffd80cf8cf99))

- Speed up incoming packet processing with a memory view
  ([#1290](https://github.com/python-zeroconf/python-zeroconf/pull/1290),
  [`f1f0a25`](https://github.com/python-zeroconf/python-zeroconf/commit/f1f0a2504afd4d29bc6b7cf715cd3cb81b9049f7))

- Speed up ServiceBrowsers with a pxd for the signal interface
  ([#1289](https://github.com/python-zeroconf/python-zeroconf/pull/1289),
  [`8a17f20`](https://github.com/python-zeroconf/python-zeroconf/commit/8a17f2053a89db4beca9e8c1de4640faf27726b4))


## v0.119.0 (2023-10-18)

### Features

- Update cibuildwheel to build wheels on latest cython final release
  ([#1285](https://github.com/python-zeroconf/python-zeroconf/pull/1285),
  [`e8c9083`](https://github.com/python-zeroconf/python-zeroconf/commit/e8c9083bb118764a85b12fac9055152a2f62a212))


## v0.118.1 (2023-10-18)

### Bug Fixes

- Reduce size of wheels by excluding generated .c files
  ([#1284](https://github.com/python-zeroconf/python-zeroconf/pull/1284),
  [`b6afa4b`](https://github.com/python-zeroconf/python-zeroconf/commit/b6afa4b2775a1fdb090145eccdc5711c98e7147a))


## v0.118.0 (2023-10-14)

### Features

- Small improvements to ServiceBrowser performance
  ([#1283](https://github.com/python-zeroconf/python-zeroconf/pull/1283),
  [`0fc031b`](https://github.com/python-zeroconf/python-zeroconf/commit/0fc031b1e7bf1766d5a1d39d70d300b86e36715e))


## v0.117.0 (2023-10-14)

### Features

- Small cleanups to incoming data handlers
  ([#1282](https://github.com/python-zeroconf/python-zeroconf/pull/1282),
  [`4f4bd9f`](https://github.com/python-zeroconf/python-zeroconf/commit/4f4bd9ff7c1e575046e5ea213d9b8c91ac7a24a9))


## v0.116.0 (2023-10-13)

### Features

- Reduce type checking overhead at run time
  ([#1281](https://github.com/python-zeroconf/python-zeroconf/pull/1281),
  [`8f30099`](https://github.com/python-zeroconf/python-zeroconf/commit/8f300996e5bd4316b2237f0502791dd0d6a855fe))


## v0.115.2 (2023-10-05)

### Bug Fixes

- Ensure ServiceInfo cache is cleared when adding to the registry
  ([#1279](https://github.com/python-zeroconf/python-zeroconf/pull/1279),
  [`2060eb2`](https://github.com/python-zeroconf/python-zeroconf/commit/2060eb2cc43489c34bea08924c3f40b875d5a498))

* There were production use cases that mutated the service info and re-registered it that need to be
  accounted for


## v0.115.1 (2023-10-01)

### Bug Fixes

- Add missing python definition for addresses_by_version
  ([#1278](https://github.com/python-zeroconf/python-zeroconf/pull/1278),
  [`52ee02b`](https://github.com/python-zeroconf/python-zeroconf/commit/52ee02b16860e344c402124f4b2e2869536ec839))


## v0.115.0 (2023-09-26)

### Features

- Speed up outgoing multicast queue
  ([#1277](https://github.com/python-zeroconf/python-zeroconf/pull/1277),
  [`a13fd49`](https://github.com/python-zeroconf/python-zeroconf/commit/a13fd49d77474fd5858de809e48cbab1ccf89173))


## v0.114.0 (2023-09-25)

### Features

- Speed up responding to queries
  ([#1275](https://github.com/python-zeroconf/python-zeroconf/pull/1275),
  [`3c6b18c`](https://github.com/python-zeroconf/python-zeroconf/commit/3c6b18cdf4c94773ad6f4497df98feb337939ee9))


## v0.113.0 (2023-09-24)

### Features

- Improve performance of loading records from cache in ServiceInfo
  ([#1274](https://github.com/python-zeroconf/python-zeroconf/pull/1274),
  [`6257d49`](https://github.com/python-zeroconf/python-zeroconf/commit/6257d49952e02107f800f4ad4894716508edfcda))


## v0.112.0 (2023-09-14)

### Features

- Improve AsyncServiceBrowser performance
  ([#1273](https://github.com/python-zeroconf/python-zeroconf/pull/1273),
  [`0c88ecf`](https://github.com/python-zeroconf/python-zeroconf/commit/0c88ecf5ef6b9b256f991e7a630048de640999a6))


## v0.111.0 (2023-09-14)

### Features

- Speed up question and answer internals
  ([#1272](https://github.com/python-zeroconf/python-zeroconf/pull/1272),
  [`d24722b`](https://github.com/python-zeroconf/python-zeroconf/commit/d24722bfa4201d48ab482d35b0ef004f070ada80))


## v0.110.0 (2023-09-14)

### Features

- Small speed ups to ServiceBrowser
  ([#1271](https://github.com/python-zeroconf/python-zeroconf/pull/1271),
  [`22c433d`](https://github.com/python-zeroconf/python-zeroconf/commit/22c433ddaea3049ac49933325ba938fd87a529c0))


## v0.109.0 (2023-09-14)

### Features

- Speed up ServiceBrowsers with a cython pxd
  ([#1270](https://github.com/python-zeroconf/python-zeroconf/pull/1270),
  [`4837876`](https://github.com/python-zeroconf/python-zeroconf/commit/48378769c3887b5746ca00de30067a4c0851765c))


## v0.108.0 (2023-09-11)

### Features

- Improve performance of constructing outgoing queries
  ([#1267](https://github.com/python-zeroconf/python-zeroconf/pull/1267),
  [`00c439a`](https://github.com/python-zeroconf/python-zeroconf/commit/00c439a6400b7850ef9fdd75bc8d82d4e64b1da0))


## v0.107.0 (2023-09-11)

### Features

- Speed up responding to queries
  ([#1266](https://github.com/python-zeroconf/python-zeroconf/pull/1266),
  [`24a0a00`](https://github.com/python-zeroconf/python-zeroconf/commit/24a0a00b3e457979e279a2eeadc8fad2ab09e125))


## v0.106.0 (2023-09-11)

### Features

- Speed up answering questions
  ([#1265](https://github.com/python-zeroconf/python-zeroconf/pull/1265),
  [`37bfaf2`](https://github.com/python-zeroconf/python-zeroconf/commit/37bfaf2f630358e8c68652f3b3120931a6f94910))


## v0.105.0 (2023-09-10)

### Features

- Speed up ServiceInfo with a cython pxd
  ([#1264](https://github.com/python-zeroconf/python-zeroconf/pull/1264),
  [`7ca690a`](https://github.com/python-zeroconf/python-zeroconf/commit/7ca690ac3fa75e7474d3412944bbd5056cb313dd))


## v0.104.0 (2023-09-10)

### Features

- Speed up generating answers
  ([#1262](https://github.com/python-zeroconf/python-zeroconf/pull/1262),
  [`50a8f06`](https://github.com/python-zeroconf/python-zeroconf/commit/50a8f066b6ab90bc9e3300f81cf9332550b720df))


## v0.103.0 (2023-09-09)

### Features

- Avoid calling get_running_loop when resolving ServiceInfo
  ([#1261](https://github.com/python-zeroconf/python-zeroconf/pull/1261),
  [`33a2714`](https://github.com/python-zeroconf/python-zeroconf/commit/33a2714cadff96edf016b869cc63b0661d16ef2c))


## v0.102.0 (2023-09-07)

### Features

- Significantly speed up writing outgoing dns records
  ([#1260](https://github.com/python-zeroconf/python-zeroconf/pull/1260),
  [`bf2f366`](https://github.com/python-zeroconf/python-zeroconf/commit/bf2f3660a1f341e50ab0ae586dfbacbc5ddcc077))


## v0.101.0 (2023-09-07)

### Features

- Speed up writing outgoing dns records
  ([#1259](https://github.com/python-zeroconf/python-zeroconf/pull/1259),
  [`248655f`](https://github.com/python-zeroconf/python-zeroconf/commit/248655f0276223b089373c70ec13a0385dfaa4d6))


## v0.100.0 (2023-09-07)

### Features

- Small speed up to writing outgoing dns records
  ([#1258](https://github.com/python-zeroconf/python-zeroconf/pull/1258),
  [`1ed6bd2`](https://github.com/python-zeroconf/python-zeroconf/commit/1ed6bd2ec4db0612b71384f923ffff1efd3ce878))


## v0.99.0 (2023-09-06)

### Features

- Reduce IP Address parsing overhead in ServiceInfo
  ([#1257](https://github.com/python-zeroconf/python-zeroconf/pull/1257),
  [`83d0b7f`](https://github.com/python-zeroconf/python-zeroconf/commit/83d0b7fda2eb09c9c6e18b85f329d1ddc701e3fb))


## v0.98.0 (2023-09-06)

### Features

- Speed up decoding incoming packets
  ([#1256](https://github.com/python-zeroconf/python-zeroconf/pull/1256),
  [`ac081cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac081cf00addde1ceea2c076f73905fdb293de3a))


## v0.97.0 (2023-09-03)

### Features

- Speed up answering queries ([#1255](https://github.com/python-zeroconf/python-zeroconf/pull/1255),
  [`2d3aed3`](https://github.com/python-zeroconf/python-zeroconf/commit/2d3aed36e24c73013fcf4acc90803fc1737d0917))


## v0.96.0 (2023-09-03)

### Features

- Optimize DNSCache.get_by_details
  ([#1254](https://github.com/python-zeroconf/python-zeroconf/pull/1254),
  [`ce59787`](https://github.com/python-zeroconf/python-zeroconf/commit/ce59787a170781ffdaa22425018d288b395ac081))

* feat: optimize DNSCache.get_by_details

This is one of the most called functions since ServiceInfo.load_from_cache calls it

* fix: make get_all_by_details thread-safe

* fix: remove unneeded key checks


## v0.95.0 (2023-09-03)

### Features

- Speed up adding and removing RecordUpdateListeners
  ([#1253](https://github.com/python-zeroconf/python-zeroconf/pull/1253),
  [`22e4a29`](https://github.com/python-zeroconf/python-zeroconf/commit/22e4a296d440b3038c0ff5ed6fc8878304ec4937))


## v0.94.0 (2023-09-03)

### Features

- Optimize cache implementation
  ([#1252](https://github.com/python-zeroconf/python-zeroconf/pull/1252),
  [`8d3ec79`](https://github.com/python-zeroconf/python-zeroconf/commit/8d3ec792277aaf7ef790318b5b35ab00839ca3b3))


## v0.93.1 (2023-09-03)

### Bug Fixes

- No change re-release due to unrecoverable failed CI run
  ([#1251](https://github.com/python-zeroconf/python-zeroconf/pull/1251),
  [`730921b`](https://github.com/python-zeroconf/python-zeroconf/commit/730921b155dfb9c62251c8c643b1302e807aff3b))


## v0.93.0 (2023-09-02)

### Features

- Reduce overhead to answer questions
  ([#1250](https://github.com/python-zeroconf/python-zeroconf/pull/1250),
  [`7cb8da0`](https://github.com/python-zeroconf/python-zeroconf/commit/7cb8da0c6c5c944588009fe36012c1197c422668))


## v0.92.0 (2023-09-02)

### Features

- Cache construction of records used to answer queries from the service registry
  ([#1243](https://github.com/python-zeroconf/python-zeroconf/pull/1243),
  [`0890f62`](https://github.com/python-zeroconf/python-zeroconf/commit/0890f628dbbd577fb77d3e6f2e267052b2b2b515))


## v0.91.1 (2023-09-02)

### Bug Fixes

- Remove useless calls in ServiceInfo
  ([#1248](https://github.com/python-zeroconf/python-zeroconf/pull/1248),
  [`4e40fae`](https://github.com/python-zeroconf/python-zeroconf/commit/4e40fae20bf50b4608e28fad4a360c4ed48ac86b))


## v0.91.0 (2023-09-02)

### Features

- Reduce overhead to process incoming updates by avoiding the handle_response shim
  ([#1247](https://github.com/python-zeroconf/python-zeroconf/pull/1247),
  [`5e31f0a`](https://github.com/python-zeroconf/python-zeroconf/commit/5e31f0afe4c341fbdbbbe50348a829ea553cbda0))


## v0.90.0 (2023-09-02)

### Features

- Avoid python float conversion in listener hot path
  ([#1245](https://github.com/python-zeroconf/python-zeroconf/pull/1245),
  [`816ad4d`](https://github.com/python-zeroconf/python-zeroconf/commit/816ad4dceb3859bad4bb136bdb1d1ee2daa0bf5a))

### Refactoring

- Reduce duplicate code in engine.py
  ([#1246](https://github.com/python-zeroconf/python-zeroconf/pull/1246),
  [`36ae505`](https://github.com/python-zeroconf/python-zeroconf/commit/36ae505dc9f95b59fdfb632960845a45ba8575b8))


## v0.89.0 (2023-09-02)

### Features

- Reduce overhead to process incoming questions
  ([#1244](https://github.com/python-zeroconf/python-zeroconf/pull/1244),
  [`18b65d1`](https://github.com/python-zeroconf/python-zeroconf/commit/18b65d1c75622869b0c29258215d3db3ae520d6c))


## v0.88.0 (2023-08-29)

### Features

- Speed up RecordManager with additional cython defs
  ([#1242](https://github.com/python-zeroconf/python-zeroconf/pull/1242),
  [`5a76fc5`](https://github.com/python-zeroconf/python-zeroconf/commit/5a76fc5ff74f2941ffbf7570e45390f35e0b7e01))


## v0.87.0 (2023-08-29)

### Features

- Improve performance by adding cython pxd for RecordManager
  ([#1241](https://github.com/python-zeroconf/python-zeroconf/pull/1241),
  [`a7dad3d`](https://github.com/python-zeroconf/python-zeroconf/commit/a7dad3d9743586f352e21eea1e129c6875f9a713))


## v0.86.0 (2023-08-28)

### Features

- Build wheels for cpython 3.12
  ([#1239](https://github.com/python-zeroconf/python-zeroconf/pull/1239),
  [`58bc154`](https://github.com/python-zeroconf/python-zeroconf/commit/58bc154f55b06b4ddfc4a141592488abe76f062a))

- Use server_key when processing DNSService records
  ([#1238](https://github.com/python-zeroconf/python-zeroconf/pull/1238),
  [`cc8feb1`](https://github.com/python-zeroconf/python-zeroconf/commit/cc8feb110fefc3fb714fd482a52f16e2b620e8c4))


## v0.85.0 (2023-08-27)

### Features

- Simplify code to unpack properties
  ([#1237](https://github.com/python-zeroconf/python-zeroconf/pull/1237),
  [`68d9998`](https://github.com/python-zeroconf/python-zeroconf/commit/68d99985a0e9d2c72ff670b2e2af92271a6fe934))


## v0.84.0 (2023-08-27)

### Features

- Context managers in ServiceBrowser and AsyncServiceBrowser
  ([#1233](https://github.com/python-zeroconf/python-zeroconf/pull/1233),
  [`bd8d846`](https://github.com/python-zeroconf/python-zeroconf/commit/bd8d8467dec2a39a0b525043ea1051259100fded))

Co-authored-by: J. Nick Koston <nick@koston.org>


## v0.83.1 (2023-08-27)

### Bug Fixes

- Rebuild wheels with cython 3.0.2
  ([#1236](https://github.com/python-zeroconf/python-zeroconf/pull/1236),
  [`dd637fb`](https://github.com/python-zeroconf/python-zeroconf/commit/dd637fb2e5a87ba283750e69d116e124bef54e7c))


## v0.83.0 (2023-08-26)

### Features

- Speed up question and answer history with a cython pxd
  ([#1234](https://github.com/python-zeroconf/python-zeroconf/pull/1234),
  [`703ecb2`](https://github.com/python-zeroconf/python-zeroconf/commit/703ecb2901b2150fb72fac3deed61d7302561298))


## v0.82.1 (2023-08-22)

### Bug Fixes

- Build failures with older cython 0.29 series
  ([#1232](https://github.com/python-zeroconf/python-zeroconf/pull/1232),
  [`30c3ad9`](https://github.com/python-zeroconf/python-zeroconf/commit/30c3ad9d1bc6b589e1ca6675fea21907ebcd1ced))


## v0.82.0 (2023-08-22)

### Features

- Optimize processing of records in RecordUpdateListener subclasses
  ([#1231](https://github.com/python-zeroconf/python-zeroconf/pull/1231),
  [`3e89294`](https://github.com/python-zeroconf/python-zeroconf/commit/3e89294ea0ecee1122e1c1ffdc78925add8ca40e))


## v0.81.0 (2023-08-22)

### Features

- Optimizing sending answers to questions
  ([#1227](https://github.com/python-zeroconf/python-zeroconf/pull/1227),
  [`cd7b56b`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7b56b2aa0c8ee429da430e9a36abd515512011))

- Speed up the service registry with a cython pxd
  ([#1226](https://github.com/python-zeroconf/python-zeroconf/pull/1226),
  [`47d3c7a`](https://github.com/python-zeroconf/python-zeroconf/commit/47d3c7ad4bc5f2247631c3ad5e6b6156d45a0a4e))


## v0.80.0 (2023-08-15)

### Features

- Optimize unpacking properties in ServiceInfo
  ([#1225](https://github.com/python-zeroconf/python-zeroconf/pull/1225),
  [`1492e41`](https://github.com/python-zeroconf/python-zeroconf/commit/1492e41b3d5cba5598cc9dd6bd2bc7d238f13555))


## v0.79.0 (2023-08-14)

### Features

- Refactor notify implementation to reduce overhead of adding and removing listeners
  ([#1224](https://github.com/python-zeroconf/python-zeroconf/pull/1224),
  [`ceb92cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ceb92cfe42d885dbb38cee7aaeebf685d97627a9))


## v0.78.0 (2023-08-14)

### Features

- Add cython pxd file for _listener.py to improve incoming message processing performance
  ([#1221](https://github.com/python-zeroconf/python-zeroconf/pull/1221),
  [`f459856`](https://github.com/python-zeroconf/python-zeroconf/commit/f459856a0a61b8afa8a541926d7e15d51f8e4aea))


## v0.77.0 (2023-08-14)

### Features

- Cythonize _listener.py to improve incoming message processing performance
  ([#1220](https://github.com/python-zeroconf/python-zeroconf/pull/1220),
  [`9efde8c`](https://github.com/python-zeroconf/python-zeroconf/commit/9efde8c8c1ed14c5d3c162f185b49212fcfcb5c9))


## v0.76.0 (2023-08-14)

### Features

- Improve performance responding to queries
  ([#1217](https://github.com/python-zeroconf/python-zeroconf/pull/1217),
  [`69b33be`](https://github.com/python-zeroconf/python-zeroconf/commit/69b33be3b2f9d4a27ef5154cae94afca048efffa))


## v0.75.0 (2023-08-13)

### Features

- Expose flag to disable strict name checking in service registration
  ([#1215](https://github.com/python-zeroconf/python-zeroconf/pull/1215),
  [`5df8a57`](https://github.com/python-zeroconf/python-zeroconf/commit/5df8a57a14d59687a3c22ea8ee063e265031e278))

- Speed up processing incoming records
  ([#1216](https://github.com/python-zeroconf/python-zeroconf/pull/1216),
  [`aff625d`](https://github.com/python-zeroconf/python-zeroconf/commit/aff625dc6a5e816dad519644c4adac4f96980c04))


## v0.74.0 (2023-08-04)

### Bug Fixes

- Remove typing on reset_ttl for cython compat
  ([#1213](https://github.com/python-zeroconf/python-zeroconf/pull/1213),
  [`0094e26`](https://github.com/python-zeroconf/python-zeroconf/commit/0094e2684344c6b7edd7948924f093f1b4c19901))

### Features

- Speed up unpacking text records in ServiceInfo
  ([#1212](https://github.com/python-zeroconf/python-zeroconf/pull/1212),
  [`99a6f98`](https://github.com/python-zeroconf/python-zeroconf/commit/99a6f98e44a1287ba537eabb852b1b69923402f0))


## v0.73.0 (2023-08-03)

### Features

- Add a cache to service_type_name
  ([#1211](https://github.com/python-zeroconf/python-zeroconf/pull/1211),
  [`53a694f`](https://github.com/python-zeroconf/python-zeroconf/commit/53a694f60e675ae0560e727be6b721b401c2b68f))


## v0.72.3 (2023-08-03)

### Bug Fixes

- Revert adding typing to DNSRecord.suppressed_by
  ([#1210](https://github.com/python-zeroconf/python-zeroconf/pull/1210),
  [`3dba5ae`](https://github.com/python-zeroconf/python-zeroconf/commit/3dba5ae0c0e9473b7b20fd6fc79fa1a3b298dc5a))


## v0.72.2 (2023-08-03)

### Bug Fixes

- Revert DNSIncoming cimport in _dns.pxd
  ([#1209](https://github.com/python-zeroconf/python-zeroconf/pull/1209),
  [`5f14b6d`](https://github.com/python-zeroconf/python-zeroconf/commit/5f14b6dc687b3a0716d0ca7f61ccf1e93dfe5fa1))


## v0.72.1 (2023-08-03)

### Bug Fixes

- Race with InvalidStateError when async_request times out
  ([#1208](https://github.com/python-zeroconf/python-zeroconf/pull/1208),
  [`2233b6b`](https://github.com/python-zeroconf/python-zeroconf/commit/2233b6bc4ceeee5524d2ee88ecae8234173feb5f))


## v0.72.0 (2023-08-02)

### Features

- Speed up processing incoming records
  ([#1206](https://github.com/python-zeroconf/python-zeroconf/pull/1206),
  [`126849c`](https://github.com/python-zeroconf/python-zeroconf/commit/126849c92be8cec9253fba9faa591029d992fcc3))


## v0.71.5 (2023-08-02)

### Bug Fixes

- Improve performance of ServiceInfo.async_request
  ([#1205](https://github.com/python-zeroconf/python-zeroconf/pull/1205),
  [`8019a73`](https://github.com/python-zeroconf/python-zeroconf/commit/8019a73c952f2fc4c88d849aab970fafedb316d8))


## v0.71.4 (2023-07-24)

### Bug Fixes

- Cleanup naming from previous refactoring in ServiceInfo
  ([#1202](https://github.com/python-zeroconf/python-zeroconf/pull/1202),
  [`b272d75`](https://github.com/python-zeroconf/python-zeroconf/commit/b272d75abd982f3be1f4b20f683cac38011cc6f4))


## v0.71.3 (2023-07-23)

### Bug Fixes

- Pin python-semantic-release to fix release process
  ([#1200](https://github.com/python-zeroconf/python-zeroconf/pull/1200),
  [`c145a23`](https://github.com/python-zeroconf/python-zeroconf/commit/c145a238d768aa17c3aebe120c20a46bfbec6b99))


## v0.71.2 (2023-07-23)

### Bug Fixes

- No change re-release to fix wheel builds
  ([#1199](https://github.com/python-zeroconf/python-zeroconf/pull/1199),
  [`8c3a4c8`](https://github.com/python-zeroconf/python-zeroconf/commit/8c3a4c80c221bea7401c12e1c6a525e75b7ffea2))


## v0.71.1 (2023-07-23)

### Bug Fixes

- Add missing if TYPE_CHECKING guard to generate_service_query
  ([#1198](https://github.com/python-zeroconf/python-zeroconf/pull/1198),
  [`ac53adf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac53adf7e71db14c1a0f9adbfd1d74033df36898))


## v0.71.0 (2023-07-08)

### Features

- Improve incoming data processing performance
  ([#1194](https://github.com/python-zeroconf/python-zeroconf/pull/1194),
  [`a56c776`](https://github.com/python-zeroconf/python-zeroconf/commit/a56c776008ef86f99db78f5997e45a57551be725))


## v0.70.0 (2023-07-02)

### Features

- Add support for sending to a specific `addr` and `port` with `ServiceInfo.async_request` and
  `ServiceInfo.request` ([#1192](https://github.com/python-zeroconf/python-zeroconf/pull/1192),
  [`405f547`](https://github.com/python-zeroconf/python-zeroconf/commit/405f54762d3f61e97de9c1787e837e953de31412))


## v0.69.0 (2023-06-18)

### Features

- Cython3 support ([#1190](https://github.com/python-zeroconf/python-zeroconf/pull/1190),
  [`8ae8ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/8ae8ba1af324b0c8c2da3bd12c264a5c0f3dcc3d))

- Reorder incoming data handler to reduce overhead
  ([#1189](https://github.com/python-zeroconf/python-zeroconf/pull/1189),
  [`32756ff`](https://github.com/python-zeroconf/python-zeroconf/commit/32756ff113f675b7a9cf16d3c0ab840ba733e5e4))


## v0.68.1 (2023-06-18)

### Bug Fixes

- Reduce debug logging overhead by adding missing checks to datagram_received
  ([#1188](https://github.com/python-zeroconf/python-zeroconf/pull/1188),
  [`ac5c50a`](https://github.com/python-zeroconf/python-zeroconf/commit/ac5c50afc70aaa33fcd20bf02222ff4f0c596fa3))


## v0.68.0 (2023-06-17)

### Features

- Reduce overhead to handle queries and responses
  ([#1184](https://github.com/python-zeroconf/python-zeroconf/pull/1184),
  [`81126b7`](https://github.com/python-zeroconf/python-zeroconf/commit/81126b7600f94848ef8c58b70bac0c6ab993c6ae))

- adds slots to handler classes

- avoid any expression overhead and inline instead


## v0.67.0 (2023-06-17)

### Features

- Speed up answering incoming questions
  ([#1186](https://github.com/python-zeroconf/python-zeroconf/pull/1186),
  [`8f37665`](https://github.com/python-zeroconf/python-zeroconf/commit/8f376658d2a3bef0353646e6fddfda15626b73a9))


## v0.66.0 (2023-06-13)

### Features

- Optimize construction of outgoing dns records
  ([#1182](https://github.com/python-zeroconf/python-zeroconf/pull/1182),
  [`fc0341f`](https://github.com/python-zeroconf/python-zeroconf/commit/fc0341f281cdb71428c0f1cf90c12d34cbb4acae))


## v0.65.0 (2023-06-13)

### Features

- Reduce overhead to enumerate ip addresses in ServiceInfo
  ([#1181](https://github.com/python-zeroconf/python-zeroconf/pull/1181),
  [`6a85cbf`](https://github.com/python-zeroconf/python-zeroconf/commit/6a85cbf2b872cb0abd184c2dd728d9ae3eb8115c))


## v0.64.1 (2023-06-05)

### Bug Fixes

- Small internal typing cleanups
  ([#1180](https://github.com/python-zeroconf/python-zeroconf/pull/1180),
  [`f03e511`](https://github.com/python-zeroconf/python-zeroconf/commit/f03e511f7aae72c5ccd4f7514d89e168847bd7a2))


## v0.64.0 (2023-06-05)

### Bug Fixes

- Always answer QU questions when the exact same packet is received from different sources in
  sequence ([#1178](https://github.com/python-zeroconf/python-zeroconf/pull/1178),
  [`74d7ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/74d7ba1aeeae56be087ee8142ee6ca1219744baa))

If the exact same packet with a QU question is asked from two different sources in a 1s window we
  end up ignoring the second one as a duplicate. We should still respond in this case because the
  client wants a unicast response and the question may not be answered by the previous packet since
  the response may not be multicast.

fix: include NSEC records in initial broadcast when registering a new service

This also revealed that we do not send NSEC records in the initial broadcast. This needed to be
  fixed in this PR as well for everything to work as expected since all the tests would fail with 2
  updates otherwise.

### Features

- Speed up processing incoming records
  ([#1179](https://github.com/python-zeroconf/python-zeroconf/pull/1179),
  [`d919316`](https://github.com/python-zeroconf/python-zeroconf/commit/d9193160b05beeca3755e19fd377ba13fe37b071))


## v0.63.0 (2023-05-25)

### Features

- Improve dns cache performance
  ([#1172](https://github.com/python-zeroconf/python-zeroconf/pull/1172),
  [`bb496a1`](https://github.com/python-zeroconf/python-zeroconf/commit/bb496a1dd5fa3562c0412cb064d14639a542592e))

- Small speed up to fetch dns addresses from ServiceInfo
  ([#1176](https://github.com/python-zeroconf/python-zeroconf/pull/1176),
  [`4deaa6e`](https://github.com/python-zeroconf/python-zeroconf/commit/4deaa6ed7c9161db55bf16ec068ab7260bbd4976))

- Speed up the service registry
  ([#1174](https://github.com/python-zeroconf/python-zeroconf/pull/1174),
  [`360ceb2`](https://github.com/python-zeroconf/python-zeroconf/commit/360ceb2548c4c4974ff798aac43a6fff9803ea0e))


## v0.62.0 (2023-05-04)

### Features

- Improve performance of ServiceBrowser outgoing query scheduler
  ([#1170](https://github.com/python-zeroconf/python-zeroconf/pull/1170),
  [`963d022`](https://github.com/python-zeroconf/python-zeroconf/commit/963d022ef82b615540fa7521d164a98a6c6f5209))


## v0.61.0 (2023-05-03)

### Features

- Speed up parsing NSEC records
  ([#1169](https://github.com/python-zeroconf/python-zeroconf/pull/1169),
  [`06fa94d`](https://github.com/python-zeroconf/python-zeroconf/commit/06fa94d87b4f0451cb475a921ce1d8e9562e0f26))


## v0.60.0 (2023-05-01)

### Features

- Speed up processing incoming data
  ([#1167](https://github.com/python-zeroconf/python-zeroconf/pull/1167),
  [`fbaaf7b`](https://github.com/python-zeroconf/python-zeroconf/commit/fbaaf7bb6ff985bdabb85feb6cba144f12d4f1d6))


## v0.59.0 (2023-05-01)

### Features

- Speed up decoding dns questions when processing incoming data
  ([#1168](https://github.com/python-zeroconf/python-zeroconf/pull/1168),
  [`f927190`](https://github.com/python-zeroconf/python-zeroconf/commit/f927190cb24f70fd7c825c6e12151fcc0daf3973))


## v0.58.2 (2023-04-26)

### Bug Fixes

- Re-release to rebuild failed wheels
  ([#1165](https://github.com/python-zeroconf/python-zeroconf/pull/1165),
  [`4986271`](https://github.com/python-zeroconf/python-zeroconf/commit/498627166a4976f1d9d8cd1f3654b0d50272d266))


## v0.58.1 (2023-04-26)

### Bug Fixes

- Reduce cast calls in service browser
  ([#1164](https://github.com/python-zeroconf/python-zeroconf/pull/1164),
  [`c0d65ae`](https://github.com/python-zeroconf/python-zeroconf/commit/c0d65aeae7037a18ed1149336f5e7bdb8b2dd8cf))


## v0.58.0 (2023-04-23)

### Features

- Speed up incoming parser ([#1163](https://github.com/python-zeroconf/python-zeroconf/pull/1163),
  [`4626399`](https://github.com/python-zeroconf/python-zeroconf/commit/46263999c0c7ea5176885f1eadd2c8498834b70e))


## v0.57.0 (2023-04-23)

### Features

- Speed up incoming data parser
  ([#1161](https://github.com/python-zeroconf/python-zeroconf/pull/1161),
  [`cb4c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/cb4c3b2b80ca3b88b8de6e87062a45e03e8805a6))


## v0.56.0 (2023-04-07)

### Features

- Reduce denial of service protection overhead
  ([#1157](https://github.com/python-zeroconf/python-zeroconf/pull/1157),
  [`2c2f26a`](https://github.com/python-zeroconf/python-zeroconf/commit/2c2f26a87d0aac81a77205b06bc9ba499caa2321))


## v0.55.0 (2023-04-07)

### Features

- Improve performance of processing incoming records
  ([#1155](https://github.com/python-zeroconf/python-zeroconf/pull/1155),
  [`b65e279`](https://github.com/python-zeroconf/python-zeroconf/commit/b65e2792751c44e0fafe9ad3a55dadc5d8ee9d46))


## v0.54.0 (2023-04-03)

### Features

- Avoid waking async_request when record updates are not relevant
  ([#1153](https://github.com/python-zeroconf/python-zeroconf/pull/1153),
  [`a3f970c`](https://github.com/python-zeroconf/python-zeroconf/commit/a3f970c7f66067cf2c302c49ed6ad8286f19b679))


## v0.53.1 (2023-04-03)

### Bug Fixes

- Addresses incorrect after server name change
  ([#1154](https://github.com/python-zeroconf/python-zeroconf/pull/1154),
  [`41ea06a`](https://github.com/python-zeroconf/python-zeroconf/commit/41ea06a0192c0d186e678009285759eb37d880d5))


## v0.53.0 (2023-04-02)

### Bug Fixes

- Make parsed_scoped_addresses return addresses in the same order as all other methods
  ([#1150](https://github.com/python-zeroconf/python-zeroconf/pull/1150),
  [`9b6adcf`](https://github.com/python-zeroconf/python-zeroconf/commit/9b6adcf5c04a469632ee866c32f5898c5cbf810a))

### Features

- Improve ServiceBrowser performance by removing OrderedDict
  ([#1148](https://github.com/python-zeroconf/python-zeroconf/pull/1148),
  [`9a16be5`](https://github.com/python-zeroconf/python-zeroconf/commit/9a16be56a9f69a5d0f7cde13dc1337b6d93c1433))


## v0.52.0 (2023-04-02)

### Features

- Add ip_addresses_by_version to ServiceInfo
  ([#1145](https://github.com/python-zeroconf/python-zeroconf/pull/1145),
  [`524494e`](https://github.com/python-zeroconf/python-zeroconf/commit/524494edd49bd049726b19ae8ac8f6eea69a3943))

- Include tests and docs in sdist archives
  ([#1142](https://github.com/python-zeroconf/python-zeroconf/pull/1142),
  [`da10a3b`](https://github.com/python-zeroconf/python-zeroconf/commit/da10a3b2827cee0719d3bb9152ae897f061c6e2e))

feat: Include tests and docs in sdist archives

Include documentation and test files in source distributions, in order to make them more useful for
  packagers (Linux distributions, Conda). Testing is an important part of packaging process, and at
  least Gentoo users have requested offline documentation for Python packages. Furthermore, the
  COPYING file was missing from sdist, even though it was referenced in README.

- Small cleanups to cache cleanup interval
  ([#1146](https://github.com/python-zeroconf/python-zeroconf/pull/1146),
  [`b434b60`](https://github.com/python-zeroconf/python-zeroconf/commit/b434b60f14ebe8f114b7b19bb4f54081c8ae0173))

- Speed up matching types in the ServiceBrowser
  ([#1144](https://github.com/python-zeroconf/python-zeroconf/pull/1144),
  [`68871c3`](https://github.com/python-zeroconf/python-zeroconf/commit/68871c3b5569e41740a66b7d3d7fa5cc41514ea5))

- Speed up processing records in the ServiceBrowser
  ([#1143](https://github.com/python-zeroconf/python-zeroconf/pull/1143),
  [`6a327d0`](https://github.com/python-zeroconf/python-zeroconf/commit/6a327d00ffb81de55b7c5b599893c789996680c1))


## v0.51.0 (2023-04-01)

### Features

- Improve performance of constructing ServiceInfo
  ([#1141](https://github.com/python-zeroconf/python-zeroconf/pull/1141),
  [`36d5b45`](https://github.com/python-zeroconf/python-zeroconf/commit/36d5b45a4ece1dca902e9c3c79b5a63b8d9ae41f))


## v0.50.0 (2023-04-01)

### Features

- Small speed up to handler dispatch
  ([#1140](https://github.com/python-zeroconf/python-zeroconf/pull/1140),
  [`5bd1b6e`](https://github.com/python-zeroconf/python-zeroconf/commit/5bd1b6e7b4dd796069461c737ded956305096307))


## v0.49.0 (2023-04-01)

### Features

- Speed up processing incoming records
  ([#1139](https://github.com/python-zeroconf/python-zeroconf/pull/1139),
  [`7246a34`](https://github.com/python-zeroconf/python-zeroconf/commit/7246a344b6c0543871b40715c95c9435db4c7f81))


## v0.48.0 (2023-04-01)

### Features

- Reduce overhead to send responses
  ([#1135](https://github.com/python-zeroconf/python-zeroconf/pull/1135),
  [`c4077dd`](https://github.com/python-zeroconf/python-zeroconf/commit/c4077dde6dfde9e2598eb63daa03c36063a3e7b0))


## v0.47.4 (2023-03-20)

### Bug Fixes

- Correct duplicate record entries in windows wheels by updating poetry-core
  ([#1134](https://github.com/python-zeroconf/python-zeroconf/pull/1134),
  [`a43055d`](https://github.com/python-zeroconf/python-zeroconf/commit/a43055d3fa258cd762c3e9394b01f8bdcb24f97e))


## v0.47.3 (2023-02-14)

### Bug Fixes

- Hold a strong reference to the query sender start task
  ([#1128](https://github.com/python-zeroconf/python-zeroconf/pull/1128),
  [`808c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/808c3b2194a7f499a469a9893102d328ccee83db))


## v0.47.2 (2023-02-14)

### Bug Fixes

- Missing c extensions with newer poetry
  ([#1129](https://github.com/python-zeroconf/python-zeroconf/pull/1129),
  [`44d7fc6`](https://github.com/python-zeroconf/python-zeroconf/commit/44d7fc6483485102f60c91d591d0d697872f8865))


## v0.47.1 (2022-12-24)

### Bug Fixes

- The equality checks for DNSPointer and DNSService should be case insensitive
  ([#1122](https://github.com/python-zeroconf/python-zeroconf/pull/1122),
  [`48ae77f`](https://github.com/python-zeroconf/python-zeroconf/commit/48ae77f026a96e2ca475b0ff80cb6d22207ce52f))


## v0.47.0 (2022-12-22)

### Features

- Optimize equality checks for DNS records
  ([#1120](https://github.com/python-zeroconf/python-zeroconf/pull/1120),
  [`3a25ff7`](https://github.com/python-zeroconf/python-zeroconf/commit/3a25ff74bea83cd7d50888ce1ebfd7650d704bfa))


## v0.46.0 (2022-12-21)

### Features

- Optimize the dns cache ([#1119](https://github.com/python-zeroconf/python-zeroconf/pull/1119),
  [`e80fcef`](https://github.com/python-zeroconf/python-zeroconf/commit/e80fcef967024f8e846e44b464a82a25f5550edf))


## v0.45.0 (2022-12-20)

### Features

- Optimize construction of outgoing packets
  ([#1118](https://github.com/python-zeroconf/python-zeroconf/pull/1118),
  [`81e186d`](https://github.com/python-zeroconf/python-zeroconf/commit/81e186d365c018381f9b486a4dbe4e2e4b8bacbf))


## v0.44.0 (2022-12-18)

### Features

- Optimize dns objects by adding pxd files
  ([#1113](https://github.com/python-zeroconf/python-zeroconf/pull/1113),
  [`919d4d8`](https://github.com/python-zeroconf/python-zeroconf/commit/919d4d875747b4fa68e25bccd5aae7f304d8a36d))


## v0.43.0 (2022-12-18)

### Features

- Optimize incoming parser by reducing call stack
  ([#1116](https://github.com/python-zeroconf/python-zeroconf/pull/1116),
  [`11f3f0e`](https://github.com/python-zeroconf/python-zeroconf/commit/11f3f0e699e00c1ee3d6d8ab5e30f62525510589))


## v0.42.0 (2022-12-18)

### Features

- Optimize incoming parser by using unpack_from
  ([#1115](https://github.com/python-zeroconf/python-zeroconf/pull/1115),
  [`a7d50ba`](https://github.com/python-zeroconf/python-zeroconf/commit/a7d50baab362eadd2d292df08a39de6836b41ea7))


## v0.41.0 (2022-12-18)

### Features

- Optimize incoming parser by adding pxd files
  ([#1111](https://github.com/python-zeroconf/python-zeroconf/pull/1111),
  [`26efeb0`](https://github.com/python-zeroconf/python-zeroconf/commit/26efeb09783050266242542228f34eb4dd83e30c))


## v0.40.1 (2022-12-18)

### Bug Fixes

- Fix project name in pyproject.toml
  ([#1112](https://github.com/python-zeroconf/python-zeroconf/pull/1112),
  [`a330f62`](https://github.com/python-zeroconf/python-zeroconf/commit/a330f62040475257c4a983044e1675aeb95e030a))


## v0.40.0 (2022-12-17)

### Features

- Drop async_timeout requirement for python 3.11+
  ([#1107](https://github.com/python-zeroconf/python-zeroconf/pull/1107),
  [`1f4224e`](https://github.com/python-zeroconf/python-zeroconf/commit/1f4224ef122299235013cb81b501f8ff9a30dea1))


## v0.39.5 (2022-12-17)


## v0.39.4 (2022-10-31)


## v0.39.3 (2022-10-26)


## v0.39.2 (2022-10-20)


## v0.39.1 (2022-09-05)


## v0.39.0 (2022-08-05)


## v0.38.7 (2022-06-14)


## v0.38.6 (2022-05-06)


## v0.38.5 (2022-05-01)


## v0.38.4 (2022-02-28)


## v0.38.3 (2022-01-31)


## v0.38.2 (2022-01-31)


## v0.38.1 (2021-12-23)


## v0.38.0 (2021-12-23)


## v0.37.0 (2021-11-18)


## v0.36.13 (2021-11-13)


## v0.36.12 (2021-11-05)


## v0.36.11 (2021-10-30)


## v0.36.10 (2021-10-30)


## v0.36.9 (2021-10-22)


## v0.36.8 (2021-10-10)


## v0.36.7 (2021-09-22)


## v0.36.6 (2021-09-19)


## v0.36.5 (2021-09-18)


## v0.36.4 (2021-09-16)


## v0.36.3 (2021-09-14)


## v0.36.2 (2021-08-30)


## v0.36.1 (2021-08-29)


## v0.36.0 (2021-08-16)


## v0.35.1 (2021-08-15)


## v0.35.0 (2021-08-13)


## v0.34.3 (2021-08-09)


## v0.34.2 (2021-08-09)


## v0.34.1 (2021-08-08)


## v0.34.0 (2021-08-08)


## v0.33.4 (2021-08-06)


## v0.33.3 (2021-08-05)


## v0.33.2 (2021-07-28)


## v0.33.1 (2021-07-18)


## v0.33.0 (2021-07-18)


## v0.32.1 (2021-07-05)


## v0.32.0 (2021-06-30)


## v0.29.0 (2021-03-25)


## v0.28.8 (2021-01-04)


## v0.28.7 (2020-12-13)


## v0.28.6 (2020-10-13)


## v0.28.5 (2020-09-11)


## v0.28.4 (2020-09-06)


## v0.28.3 (2020-08-31)


## v0.28.2 (2020-08-27)


## v0.28.1 (2020-08-17)


## v0.28.0 (2020-07-07)


## v0.27.1 (2020-06-05)


## v0.27.0 (2020-05-27)


## v0.26.3 (2020-05-26)


## v0.26.1 (2020-05-06)


## v0.26.0 (2020-04-26)


## v0.25.1 (2020-04-14)


## v0.25.0 (2020-04-03)


## v0.24.5 (2020-03-08)


## v0.24.4 (2019-12-30)


## v0.24.3 (2019-12-23)


## v0.24.2 (2019-12-17)


## v0.24.1 (2019-12-16)


## v0.24.0 (2019-11-19)


## v0.23.0 (2019-06-04)


## v0.22.0 (2019-04-27)


## v0.21.3 (2018-09-21)


## v0.21.2 (2018-09-20)


## v0.21.1 (2018-09-17)


## v0.21.0 (2018-09-16)


## v0.20.0 (2018-02-21)


## v0.19.1 (2017-06-13)


## v0.19.0 (2017-03-21)


## v0.18.0 (2017-02-03)


## v0.17.7 (2017-02-01)


## v0.17.6 (2016-07-08)

### Testing

- Added test for DNS-SD subtype discovery
  ([`914241b`](https://github.com/python-zeroconf/python-zeroconf/commit/914241b92c3097669e1e8c1a380f6c2f23a14cf8))


## v0.17.5 (2016-03-14)


## v0.17.4 (2015-09-22)


## v0.17.3 (2015-08-19)


## v0.17.2 (2015-07-12)


## v0.17.1 (2015-04-10)


## v0.17.0 (2015-04-10)


## v0.15.1 (2014-07-10)
0707010000000A000081A400000000000000000000000167C7AD1600005F3C000000000000000000000000000000000000002000000000python-zeroconf-0.146.0/COPYING		  GNU LESSER GENERAL PUBLIC LICENSE
		       Version 2.1, February 1999

 Copyright (C) 1991, 1999 Free Software Foundation, Inc.
     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

[This is the first released version of the Lesser GPL.  It also counts
 as the successor of the GNU Library Public License, version 2, hence
 the version number 2.1.]

			    Preamble

  The licenses for most software are designed to take away your
freedom to share and change it.  By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.

  This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it.  You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.

  When we speak of free software, we are referring to freedom of use,
not price.  Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.

  To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights.  These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.

  For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you.  You must make sure that they, too, receive or can get the source
code.  If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it.  And you must show them these terms so they know their rights.

  We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.

  To protect each distributor, we want to make it very clear that
there is no warranty for the free library.  Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.

  Finally, software patents pose a constant threat to the existence of
any free program.  We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder.  Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.

  Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License.  This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License.  We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.

  When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library.  The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom.  The Lesser General
Public License permits more lax criteria for linking other code with
the library.

  We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License.  It also provides other free software developers Less
of an advantage over competing non-free programs.  These disadvantages
are the reason we use the ordinary General Public License for many
libraries.  However, the Lesser license provides advantages in certain
special circumstances.

  For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard.  To achieve this, non-free programs must be
allowed to use the library.  A more frequent case is that a free
library does the same job as widely used non-free libraries.  In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.

  In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software.  For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.

  Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.

  The precise terms and conditions for copying, distribution and
modification follow.  Pay close attention to the difference between a
"work based on the library" and a "work that uses the library".  The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.

		  GNU LESSER GENERAL PUBLIC LICENSE
   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

  0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".

  A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.

  The "Library", below, refers to any such software library or work
which has been distributed under these terms.  A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language.  (Hereinafter, translation is
included without limitation in the term "modification".)

  "Source code" for a work means the preferred form of the work for
making modifications to it.  For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.

  Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope.  The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it).  Whether that is true depends on what the Library does
and what the program that uses the Library does.

  1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.

  You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.

  2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:

    a) The modified work must itself be a software library.

    b) You must cause the files modified to carry prominent notices
    stating that you changed the files and the date of any change.

    c) You must cause the whole of the work to be licensed at no
    charge to all third parties under the terms of this License.

    d) If a facility in the modified Library refers to a function or a
    table of data to be supplied by an application program that uses
    the facility, other than as an argument passed when the facility
    is invoked, then you must make a good faith effort to ensure that,
    in the event an application does not supply such function or
    table, the facility still operates, and performs whatever part of
    its purpose remains meaningful.

    (For example, a function in a library to compute square roots has
    a purpose that is entirely well-defined independent of the
    application.  Therefore, Subsection 2d requires that any
    application-supplied function or table used by this function must
    be optional: if the application does not supply it, the square
    root function must still compute square roots.)

These requirements apply to the modified work as a whole.  If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works.  But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.

Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.

In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.

  3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library.  To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License.  (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.)  Do not make any other change in
these notices.

  Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.

  This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.

  4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.

  If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.

  5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library".  Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.

  However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library".  The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.

  When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library.  The
threshold for this to be true is not precisely defined by law.

  If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work.  (Executables containing this object code plus portions of the
Library will still fall under Section 6.)

  Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.

  6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.

  You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License.  You must supply a copy of this License.  If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License.  Also, you must do one
of these things:

    a) Accompany the work with the complete corresponding
    machine-readable source code for the Library including whatever
    changes were used in the work (which must be distributed under
    Sections 1 and 2 above); and, if the work is an executable linked
    with the Library, with the complete machine-readable "work that
    uses the Library", as object code and/or source code, so that the
    user can modify the Library and then relink to produce a modified
    executable containing the modified Library.  (It is understood
    that the user who changes the contents of definitions files in the
    Library will not necessarily be able to recompile the application
    to use the modified definitions.)

    b) Use a suitable shared library mechanism for linking with the
    Library.  A suitable mechanism is one that (1) uses at run time a
    copy of the library already present on the user's computer system,
    rather than copying library functions into the executable, and (2)
    will operate properly with a modified version of the library, if
    the user installs one, as long as the modified version is
    interface-compatible with the version that the work was made with.

    c) Accompany the work with a written offer, valid for at
    least three years, to give the same user the materials
    specified in Subsection 6a, above, for a charge no more
    than the cost of performing this distribution.

    d) If distribution of the work is made by offering access to copy
    from a designated place, offer equivalent access to copy the above
    specified materials from the same place.

    e) Verify that the user has already received a copy of these
    materials or that you have already sent this user a copy.

  For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it.  However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.

  It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system.  Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.

  7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:

    a) Accompany the combined library with a copy of the same work
    based on the Library, uncombined with any other library
    facilities.  This must be distributed under the terms of the
    Sections above.

    b) Give prominent notice with the combined library of the fact
    that part of it is a work based on the Library, and explaining
    where to find the accompanying uncombined form of the same work.

  8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License.  Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License.  However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.

  9. You are not required to accept this License, since you have not
signed it.  However, nothing else grants you permission to modify or
distribute the Library or its derivative works.  These actions are
prohibited by law if you do not accept this License.  Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.

  10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions.  You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.

  11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all.  For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.

If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.

It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices.  Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.

This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.

  12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded.  In such case, this License incorporates the limitation as if
written in the body of this License.

  13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.

Each version is given a distinguishing version number.  If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation.  If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.

  14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission.  For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this.  Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.

			    NO WARRANTY

  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.

		     END OF TERMS AND CONDITIONS
0707010000000B000081A400000000000000000000000167C7AD1600000036000000000000000000000000000000000000002400000000python-zeroconf-0.146.0/MANIFEST.ininclude README.rst
include COPYING
global-exclude *.c
0707010000000C000081A400000000000000000000000167C7AD160000004F000000000000000000000000000000000000002100000000python-zeroconf-0.146.0/Makefile# version: 1.3


test:
	poetry run pytest --durations=20 --timeout=60 -v tests
0707010000000D000081A400000000000000000000000167C7AD16000012B1000000000000000000000000000000000000002300000000python-zeroconf-0.146.0/README.rstpython-zeroconf
===============

.. image:: https://github.com/python-zeroconf/python-zeroconf/workflows/CI/badge.svg
   :target: https://github.com/python-zeroconf/python-zeroconf?query=workflow%3ACI+branch%3Amaster

.. image:: https://img.shields.io/pypi/v/zeroconf.svg
    :target: https://pypi.python.org/pypi/zeroconf

.. image:: https://codecov.io/gh/python-zeroconf/python-zeroconf/branch/master/graph/badge.svg
   :target: https://codecov.io/gh/python-zeroconf/python-zeroconf

.. image:: https://img.shields.io/endpoint?url=https://codspeed.io/badge.json
   :target: https://codspeed.io/python-zeroconf/python-zeroconf
   :alt: Codspeed.io status for python-zeroconf

.. image:: https://readthedocs.org/projects/python-zeroconf/badge/?version=latest
    :target: https://python-zeroconf.readthedocs.io/en/latest/?badge=latest
    :alt: Documentation Status

`Documentation <https://python-zeroconf.readthedocs.io/en/latest/>`_.

This is fork of pyzeroconf, Multicast DNS Service Discovery for Python,
originally by Paul Scott-Murphy (https://github.com/paulsm/pyzeroconf),
modified by William McBrine (https://github.com/wmcbrine/pyzeroconf).

The original William McBrine's fork note::

    This fork is used in all of my TiVo-related projects: HME for Python
    (and therefore HME/VLC), Network Remote, Remote Proxy, and pyTivo.
    Before this, I was tracking the changes for zeroconf.py in three
    separate repos. I figured I should have an authoritative source.

    Although I make changes based on my experience with TiVos, I expect that
    they're generally applicable. This version also includes patches found
    on the now-defunct (?) Launchpad repo of pyzeroconf, and elsewhere
    around the net -- not always well-documented, sorry.

Compatible with:

* Bonjour
* Avahi

Compared to some other Zeroconf/Bonjour/Avahi Python packages, python-zeroconf:

* isn't tied to Bonjour or Avahi
* doesn't use D-Bus
* doesn't force you to use particular event loop or Twisted (asyncio is used under the hood but not required)
* is pip-installable
* has PyPI distribution
* has an optional cython extension for performance (pure python is supported as well)

Python compatibility
--------------------

* CPython 3.9+
* PyPy 3.9+

Versioning
----------

This project uses semantic versioning.

Status
------

This project is actively maintained.

Traffic Reduction
-----------------

Before version 0.32, most traffic reduction techniques described in https://datatracker.ietf.org/doc/html/rfc6762#section-7
where not implemented which could lead to excessive network traffic.  It is highly recommended that version 0.32 or later
is used if this is a concern.

IPv6 support
------------

IPv6 support is relatively new and currently limited, specifically:

* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default` on non-POSIX
  systems.
* Dual-stack IPv6 sockets are used, which may not be supported everywhere (some
  BSD variants do not have them).
* Listening on localhost (`::1`) does not work. Help with understanding why is
  appreciated.

How to get python-zeroconf?
===========================

* PyPI page https://pypi.org/project/zeroconf/
* GitHub project https://github.com/python-zeroconf/python-zeroconf

The easiest way to install python-zeroconf is using pip::

    pip install zeroconf



How do I use it?
================

Here's an example of browsing for a service:

.. code-block:: python

    from zeroconf import ServiceBrowser, ServiceListener, Zeroconf


    class MyListener(ServiceListener):

        def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
            print(f"Service {name} updated")

        def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
            print(f"Service {name} removed")

        def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
            info = zc.get_service_info(type_, name)
            print(f"Service {name} added, service info: {info}")


    zeroconf = Zeroconf()
    listener = MyListener()
    browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
    try:
        input("Press enter to exit...\n\n")
    finally:
        zeroconf.close()

.. note::

    Discovery and service registration use *all* available network interfaces by default.
    If you want to customize that you need to specify ``interfaces`` argument when
    constructing ``Zeroconf`` object (see the code for details).

If you don't know the name of the service you need to browse for, try:

.. code-block:: python

    from zeroconf import ZeroconfServiceTypes
    print('\n'.join(ZeroconfServiceTypes.find()))

See examples directory for more.

Changelog
=========

`Changelog <CHANGELOG.md>`_

License
=======

LGPL, see COPYING file for details.
0707010000000E000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000001E00000000python-zeroconf-0.146.0/bench0707010000000F000081A400000000000000000000000167C7AD160000022B000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/bench/create_destory.py"""Benchmark for AsyncZeroconf."""

import asyncio
import time

from zeroconf.asyncio import AsyncZeroconf

iterations = 10000


async def _create_destroy(count: int) -> None:
    for _ in range(count):
        async with AsyncZeroconf() as zc:
            await zc.zeroconf.async_wait_for_start()


async def _run() -> None:
    start = time.perf_counter()
    await _create_destroy(iterations)
    duration = time.perf_counter() - start
    print(f"Creating and destroying {iterations} Zeroconf instances took {duration} seconds")


asyncio.run(_run())
07070100000010000081A400000000000000000000000167C7AD160000184E000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/bench/incoming.py"""Benchmark for DNSIncoming."""

import socket
import timeit

from zeroconf import (
    DNSAddress,
    DNSIncoming,
    DNSNsec,
    DNSOutgoing,
    DNSService,
    DNSText,
    const,
)


def generate_packets() -> list[bytes]:
    out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    address = socket.inet_pton(socket.AF_INET, "192.168.208.5")

    additionals = [
        {
            "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
            "address": address,
            "port": 51832,
            "text": b"\x13md=HASS Bridge"
            b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
        },
        {
            "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
            "address": address,
            "port": 51834,
            "text": b"\x13md=HASS Bridge"
            b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
        },
        {
            "name": "Master Bed TV CEDB27._hap._tcp.local.",
            "address": address,
            "port": 51830,
            "text": b"\x10md=Master Bed"
            b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=CVj1kw==",
        },
        {
            "name": "Living Room TV 921B77._hap._tcp.local.",
            "address": address,
            "port": 51833,
            "text": b"\x11md=Living Room"
            b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=qU77SQ==",
        },
        {
            "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
            "address": address,
            "port": 51829,
            "text": b"\x13md=HASS Bridge"
            b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b0QZlg==",
        },
        {
            "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
            "address": address,
            "port": 51837,
            "text": b"\x13md=HASS Bridge"
            b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=ahAISA==",
        },
        {
            "name": "FrontdoorCamera 8941D1._hap._tcp.local.",
            "address": address,
            "port": 54898,
            "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
            b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
        },
        {
            "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            "address": address,
            "port": 51836,
            "text": b"\x13md=HASS Bridge"
            b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=6fLM5A==",
        },
        {
            "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
            "address": address,
            "port": 51838,
            "text": b"\x13md=HASS Bridge"
            b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=u3bdfw==",
        },
        {
            "name": "Snooze Room TV 6B89B0._hap._tcp.local.",
            "address": address,
            "port": 51835,
            "text": b"\x11md=Snooze Room"
            b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=xNTqsg==",
        },
        {
            "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
            "address": address,
            "port": 54811,
            "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
            b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
        },
        {
            "name": "HASS Bridge OS95 39C053._hap._tcp.local.",
            "address": address,
            "port": 51831,
            "text": b"\x13md=HASS Bridge"
            b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
            b"\x04sf=0\x0bsh=Xfe5LQ==",
        },
    ]

    out.add_answer_at_time(
        DNSText(
            "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )

    for record in additionals:
        out.add_additional_answer(
            DNSService(
                record["name"],  # type: ignore
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                record["port"],  # type: ignore
                record["name"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSText(
                record["name"],  # type: ignore
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["text"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSAddress(
                record["name"],  # type: ignore
                const._TYPE_A,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                record["address"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSNsec(
                record["name"],  # type: ignore
                const._TYPE_NSEC,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["name"],  # type: ignore
                [const._TYPE_TXT, const._TYPE_SRV],
            )
        )

    return out.packets()


packets = generate_packets()


def parse_incoming_message() -> None:
    for packet in packets:
        DNSIncoming(packet).answers  # noqa: B018
        break


count = 100000
time = timeit.Timer(parse_incoming_message).timeit(count)
print(f"Parsing {count} incoming messages took {time} seconds")
07070100000011000081A400000000000000000000000167C7AD16000016E9000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/bench/outgoing.py"""Benchmark for DNSOutgoing."""

import socket
import timeit

from zeroconf import DNSAddress, DNSOutgoing, DNSService, DNSText, const
from zeroconf._protocol.outgoing import State


def generate_packets() -> DNSOutgoing:
    out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    address = socket.inet_pton(socket.AF_INET, "192.168.208.5")

    additionals = [
        {
            "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
            "address": address,
            "port": 51832,
            "text": b"\x13md=HASS Bridge"
            b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
        },
        {
            "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
            "address": address,
            "port": 51834,
            "text": b"\x13md=HASS Bridge"
            b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
        },
        {
            "name": "Master Bed TV CEDB27._hap._tcp.local.",
            "address": address,
            "port": 51830,
            "text": b"\x10md=Master Bed"
            b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=CVj1kw==",
        },
        {
            "name": "Living Room TV 921B77._hap._tcp.local.",
            "address": address,
            "port": 51833,
            "text": b"\x11md=Living Room"
            b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=qU77SQ==",
        },
        {
            "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
            "address": address,
            "port": 51829,
            "text": b"\x13md=HASS Bridge"
            b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b0QZlg==",
        },
        {
            "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
            "address": address,
            "port": 51837,
            "text": b"\x13md=HASS Bridge"
            b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=ahAISA==",
        },
        {
            "name": "FrontdoorCamera 8941D1._hap._tcp.local.",
            "address": address,
            "port": 54898,
            "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
            b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
        },
        {
            "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            "address": address,
            "port": 51836,
            "text": b"\x13md=HASS Bridge"
            b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=6fLM5A==",
        },
        {
            "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
            "address": address,
            "port": 51838,
            "text": b"\x13md=HASS Bridge"
            b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=u3bdfw==",
        },
        {
            "name": "Snooze Room TV 6B89B0._hap._tcp.local.",
            "address": address,
            "port": 51835,
            "text": b"\x11md=Snooze Room"
            b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=xNTqsg==",
        },
        {
            "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
            "address": address,
            "port": 54811,
            "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
            b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
        },
        {
            "name": "HASS Bridge OS95 39C053._hap._tcp.local.",
            "address": address,
            "port": 51831,
            "text": b"\x13md=HASS Bridge"
            b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
            b"\x04sf=0\x0bsh=Xfe5LQ==",
        },
    ]

    out.add_answer_at_time(
        DNSText(
            "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )

    for record in additionals:
        out.add_additional_answer(
            DNSService(
                record["name"],  # type: ignore
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                record["port"],  # type: ignore
                record["name"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSText(
                record["name"],  # type: ignore
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["text"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSAddress(
                record["name"],  # type: ignore
                const._TYPE_A,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                record["address"],  # type: ignore
            )
        )

    return out


out = generate_packets()


def make_outgoing_message() -> None:
    out.packets()
    out.state = State.init.value
    out.finished = False
    out._reset_for_next_packet()


count = 100000
time = timeit.Timer(make_outgoing_message).timeit(count)
print(f"Construction {count} outgoing messages took {time} seconds")
07070100000012000081A400000000000000000000000167C7AD160000020A000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/bench/txt_properties.pyimport timeit

from zeroconf import ServiceInfo

info = ServiceInfo(
    "_test._tcp.local.",
    "test._test._tcp.local.",
    properties=(
        b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
        b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA=="
    ),
)


def process_properties() -> None:
    info._properties = None
    info.properties  # noqa: B018


count = 100000
time = timeit.Timer(process_properties).timeit(count)
print(f"Processing {count} properties took {time} seconds")
07070100000013000081A400000000000000000000000167C7AD160000080E000000000000000000000000000000000000002500000000python-zeroconf-0.146.0/build_ext.py"""Build optional cython modules."""

import logging
import os
from distutils.command.build_ext import build_ext
from typing import Any

try:
    from setuptools import Extension
except ImportError:
    from distutils.core import Extension

_LOGGER = logging.getLogger(__name__)

TO_CYTHONIZE = [
    "src/zeroconf/_dns.py",
    "src/zeroconf/_cache.py",
    "src/zeroconf/_history.py",
    "src/zeroconf/_record_update.py",
    "src/zeroconf/_listener.py",
    "src/zeroconf/_protocol/incoming.py",
    "src/zeroconf/_protocol/outgoing.py",
    "src/zeroconf/_handlers/answers.py",
    "src/zeroconf/_handlers/record_manager.py",
    "src/zeroconf/_handlers/multicast_outgoing_queue.py",
    "src/zeroconf/_handlers/query_handler.py",
    "src/zeroconf/_services/__init__.py",
    "src/zeroconf/_services/browser.py",
    "src/zeroconf/_services/info.py",
    "src/zeroconf/_services/registry.py",
    "src/zeroconf/_updates.py",
    "src/zeroconf/_utils/ipaddress.py",
    "src/zeroconf/_utils/time.py",
]

EXTENSIONS = [
    Extension(
        ext.removeprefix("src/").removesuffix(".py").replace("/", "."),
        [ext],
        language="c",
        extra_compile_args=["-O3", "-g0"],
    )
    for ext in TO_CYTHONIZE
]


class BuildExt(build_ext):
    def build_extensions(self) -> None:
        try:
            super().build_extensions()
        except Exception:
            _LOGGER.info("Failed to build cython extensions")


def build(setup_kwargs: Any) -> None:
    if os.environ.get("SKIP_CYTHON", False):
        return
    try:
        from Cython.Build import cythonize

        setup_kwargs.update(
            {
                "ext_modules": cythonize(
                    EXTENSIONS,
                    compiler_directives={"language_level": "3"},  # Python 3
                ),
                "cmdclass": {"build_ext": BuildExt},
            }
        )
        setup_kwargs["exclude_package_data"] = {pkg: ["*.c"] for pkg in setup_kwargs["packages"]}
    except Exception:
        if os.environ.get("REQUIRE_CYTHON"):
            raise
07070100000014000081A400000000000000000000000167C7AD16000000F2000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/commitlint.config.mjsexport default {
  extends: ["@commitlint/config-conventional"],
  rules: {
    "header-max-length": [0, "always", Infinity],
    "body-max-line-length": [0, "always", Infinity],
    "footer-max-line-length": [0, "always", Infinity],
  },
};
07070100000015000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000001D00000000python-zeroconf-0.146.0/docs07070100000016000081A400000000000000000000000167C7AD160000027A000000000000000000000000000000000000002600000000python-zeroconf-0.146.0/docs/Makefile# Minimal makefile for Sphinx documentation
#

# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS    ?=
SPHINXBUILD   ?= sphinx-build
SOURCEDIR     = .
BUILDDIR      = _build

# Put it first so that "make" without argument is like "make help".
help:
	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

.PHONY: help Makefile

# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
07070100000017000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002200000000python-zeroconf-0.146.0/docs/_ext07070100000018000081A400000000000000000000000167C7AD1600000233000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/docs/_ext/zeroconfautodocfix.py"""
Must be included after 'sphinx.ext.autodoc'. Fixes unwanted 'alias of' behavior.
"""

# pylint: disable=import-error
from sphinx.application import Sphinx


def skip_member(app, what, name, obj, skip: bool, options) -> bool:  # type: ignore[no-untyped-def]
    return (
        skip
        or getattr(obj, "__doc__", None) is None
        or getattr(obj, "__private__", False) is True
        or getattr(getattr(obj, "__func__", None), "__private__", False) is True
    )


def setup(app: Sphinx) -> None:
    app.connect("autodoc-skip-member", skip_member)
07070100000019000081A400000000000000000000000167C7AD16000000EA000000000000000000000000000000000000002500000000python-zeroconf-0.146.0/docs/api.rstpython-zeroconf API reference
=============================

.. automodule:: zeroconf
    :members:
    :undoc-members:
    :show-inheritance:

.. automodule:: zeroconf.asyncio
    :members:
    :undoc-members:
    :show-inheritance:
0707010000001A000081A400000000000000000000000167C7AD1600000B03000000000000000000000000000000000000002500000000python-zeroconf-0.146.0/docs/conf.py# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import sys
from collections.abc import Sequence
from pathlib import Path

# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use Path.absolute to make it absolute.
sys.path.append(str(Path(__file__).parent / "_ext"))
sys.path.insert(0, str(Path(__file__).parent.parent))

# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information

project = "python-zeroconf"
project_copyright = "python-zeroconf authors"
author = "python-zeroconf authors"

try:
    import zeroconf

    # The short X.Y version.
    version = zeroconf.__version__
    # The full version, including alpha/beta/rc tags.
    release = version
except ImportError:
    version = ""
    release = ""

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

extensions = [
    "sphinx.ext.todo",  # Allow todo comments.
    "sphinx.ext.viewcode",  # Link to source code.
    "sphinx.ext.autodoc",
    "zeroconfautodocfix",  # Must be after "sphinx.ext.autodoc"
    "sphinx.ext.intersphinx",
    "sphinx.ext.coverage",  # Enable the overage report.
    "sphinx.ext.duration",  # Show build duration at the end.
    "sphinx_rtd_theme",  # Required for theme.
]

templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]

# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output

html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]

# Custom sidebar templates, maps document names to template names.
html_sidebars: dict[str, Sequence[str]] = {
    "index": ("sidebar.html", "sourcelink.html", "searchbox.html"),
    "**": ("localtoc.html", "relations.html", "sourcelink.html", "searchbox.html"),
}

# -- Options for RTD theme ---------------------------------------------------
# https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html

# html_theme_options = {}

# -- Options for HTML help output --------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-help-output

htmlhelp_basename = "zeroconfdoc"

# -- Options for intersphinx extension ---------------------------------------
# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration

intersphinx_mapping = {
    "python": ("https://docs.python.org/3", None),
}
0707010000001B000081A400000000000000000000000167C7AD16000003DF000000000000000000000000000000000000002700000000python-zeroconf-0.146.0/docs/index.rstWelcome to python-zeroconf documentation!
=========================================

.. image:: https://github.com/jstasiak/python-zeroconf/workflows/CI/badge.svg
   :target: https://github.com/jstasiak/python-zeroconf?query=workflow%3ACI+branch%3Amaster

.. image:: https://img.shields.io/pypi/v/zeroconf.svg
    :target: https://pypi.python.org/pypi/zeroconf

.. image:: https://codecov.io/gh/jstasiak/python-zeroconf/branch/master/graph/badge.svg
   :target: https://codecov.io/gh/jstasiak/python-zeroconf

GitHub (code repository, issues): https://github.com/jstasiak/python-zeroconf

PyPI (installable, stable distributions): https://pypi.org/project/zeroconf. You can install python-zeroconf using pip::

    pip install zeroconf

python-zeroconf works with CPython 3.8+ and PyPy 3 implementing Python 3.8+.

Contents
--------

.. toctree::
   :maxdepth: 1

   api

See `the project's README <https://github.com/jstasiak/python-zeroconf/blob/master/README.rst>`_ for more information.
0707010000001C000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002100000000python-zeroconf-0.146.0/examples0707010000001D000081ED00000000000000000000000167C7AD1600001205000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/examples/async_apple_scanner.py#!/usr/bin/env python

"""Scan for apple devices."""

from __future__ import annotations

import argparse
import asyncio
import logging
from typing import Any, cast

from zeroconf import DNSQuestionType, IPVersion, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf

HOMESHARING_SERVICE: str = "_appletv-v2._tcp.local."
DEVICE_SERVICE: str = "_touch-able._tcp.local."
MEDIAREMOTE_SERVICE: str = "_mediaremotetv._tcp.local."
AIRPLAY_SERVICE: str = "_airplay._tcp.local."
COMPANION_SERVICE: str = "_companion-link._tcp.local."
RAOP_SERVICE: str = "_raop._tcp.local."
AIRPORT_ADMIN_SERVICE: str = "_airport._tcp.local."
DEVICE_INFO_SERVICE: str = "_device-info._tcp.local."

ALL_SERVICES = [
    HOMESHARING_SERVICE,
    DEVICE_SERVICE,
    MEDIAREMOTE_SERVICE,
    AIRPLAY_SERVICE,
    COMPANION_SERVICE,
    RAOP_SERVICE,
    AIRPORT_ADMIN_SERVICE,
    DEVICE_INFO_SERVICE,
]

log = logging.getLogger(__name__)

_PENDING_TASKS: set[asyncio.Task] = set()


def async_on_service_state_change(
    zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
    print(f"Service {name} of type {service_type} state changed: {state_change}")
    if state_change is not ServiceStateChange.Added:
        return
    base_name = name[: -len(service_type) - 1]
    device_name = f"{base_name}.{DEVICE_INFO_SERVICE}"
    task = asyncio.ensure_future(_async_show_service_info(zeroconf, service_type, name))
    _PENDING_TASKS.add(task)
    task.add_done_callback(_PENDING_TASKS.discard)
    # Also probe for device info
    task = asyncio.ensure_future(_async_show_service_info(zeroconf, DEVICE_INFO_SERVICE, device_name))
    _PENDING_TASKS.add(task)
    task.add_done_callback(_PENDING_TASKS.discard)


async def _async_show_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None:
    info = AsyncServiceInfo(service_type, name)
    await info.async_request(zeroconf, 3000, question_type=DNSQuestionType.QU)
    print(f"Info from zeroconf.get_service_info: {info!r}")
    if info:
        addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_addresses()]
        print(f"  Name: {name}")
        print(f"  Addresses: {', '.join(addresses)}")
        print(f"  Weight: {info.weight}, priority: {info.priority}")
        print(f"  Server: {info.server}")
        if info.properties:
            print("  Properties are:")
            for key, value in info.properties.items():
                print(f"    {key!r}: {value!r}")
        else:
            print("  No properties")
    else:
        print("  No info")
    print("\n")


class AsyncAppleScanner:
    def __init__(self, args: Any) -> None:
        self.args = args
        self.aiobrowser: AsyncServiceBrowser | None = None
        self.aiozc: AsyncZeroconf | None = None

    async def async_run(self) -> None:
        self.aiozc = AsyncZeroconf(ip_version=ip_version)
        await self.aiozc.zeroconf.async_wait_for_start()
        print(f"\nBrowsing {ALL_SERVICES} service(s), press Ctrl-C to exit...\n")
        kwargs = {
            "handlers": [async_on_service_state_change],
            "question_type": DNSQuestionType.QU,
        }
        if self.args.target:
            kwargs["addr"] = self.args.target
        self.aiobrowser = AsyncServiceBrowser(
            self.aiozc.zeroconf,
            ALL_SERVICES,
            **kwargs,  # type: ignore[arg-type]
        )
        await asyncio.Event().wait()

    async def async_close(self) -> None:
        assert self.aiozc is not None
        assert self.aiobrowser is not None
        await self.aiobrowser.async_cancel()
        await self.aiozc.async_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--target", help="Unicast target")
    version_group.add_argument("--v6", action="store_true")
    version_group.add_argument("--v6-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6:
        ip_version = IPVersion.All
    elif args.v6_only:
        ip_version = IPVersion.V6Only
    else:
        ip_version = IPVersion.V4Only

    loop = asyncio.get_event_loop()
    runner = AsyncAppleScanner(args)
    try:
        loop.run_until_complete(runner.async_run())
    except KeyboardInterrupt:
        loop.run_until_complete(runner.async_close())
0707010000001E000081ED00000000000000000000000167C7AD1600000E86000000000000000000000000000000000000003200000000python-zeroconf-0.146.0/examples/async_browser.py#!/usr/bin/env python

"""Example of browsing for a service.

The default is HTTP and HAP; use --find to search for all available services in the network
"""

from __future__ import annotations

import argparse
import asyncio
import logging
from typing import Any, cast

from zeroconf import IPVersion, ServiceStateChange, Zeroconf
from zeroconf.asyncio import (
    AsyncServiceBrowser,
    AsyncServiceInfo,
    AsyncZeroconf,
    AsyncZeroconfServiceTypes,
)

_PENDING_TASKS: set[asyncio.Task] = set()


def async_on_service_state_change(
    zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
    print(f"Service {name} of type {service_type} state changed: {state_change}")
    if state_change is not ServiceStateChange.Added:
        return
    task = asyncio.ensure_future(async_display_service_info(zeroconf, service_type, name))
    _PENDING_TASKS.add(task)
    task.add_done_callback(_PENDING_TASKS.discard)


async def async_display_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None:
    info = AsyncServiceInfo(service_type, name)
    await info.async_request(zeroconf, 3000)
    print(f"Info from zeroconf.get_service_info: {info!r}")
    if info:
        addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_scoped_addresses()]
        print(f"  Name: {name}")
        print(f"  Addresses: {', '.join(addresses)}")
        print(f"  Weight: {info.weight}, priority: {info.priority}")
        print(f"  Server: {info.server}")
        if info.properties:
            print("  Properties are:")
            for key, value in info.properties.items():
                print(f"    {key!r}: {value!r}")
        else:
            print("  No properties")
    else:
        print("  No info")
    print("\n")


class AsyncRunner:
    def __init__(self, args: Any) -> None:
        self.args = args
        self.aiobrowser: AsyncServiceBrowser | None = None
        self.aiozc: AsyncZeroconf | None = None

    async def async_run(self) -> None:
        self.aiozc = AsyncZeroconf(ip_version=ip_version)

        services = ["_http._tcp.local.", "_hap._tcp.local."]
        if self.args.find:
            services = list(
                await AsyncZeroconfServiceTypes.async_find(aiozc=self.aiozc, ip_version=ip_version)
            )

        print(f"\nBrowsing {services} service(s), press Ctrl-C to exit...\n")
        self.aiobrowser = AsyncServiceBrowser(
            self.aiozc.zeroconf, services, handlers=[async_on_service_state_change]
        )
        await asyncio.Event().wait()

    async def async_close(self) -> None:
        assert self.aiozc is not None
        assert self.aiobrowser is not None
        await self.aiobrowser.async_cancel()
        await self.aiozc.async_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    parser.add_argument("--find", action="store_true", help="Browse all available services")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--v6", action="store_true")
    version_group.add_argument("--v6-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6:
        ip_version = IPVersion.All
    elif args.v6_only:
        ip_version = IPVersion.V6Only
    else:
        ip_version = IPVersion.V4Only

    loop = asyncio.get_event_loop()
    runner = AsyncRunner(args)
    try:
        loop.run_until_complete(runner.async_run())
    except KeyboardInterrupt:
        loop.run_until_complete(runner.async_close())
0707010000001F000081ED00000000000000000000000167C7AD16000009E2000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/examples/async_registration.py#!/usr/bin/env python

"""Example of announcing 250 services (in this case, a fake HTTP server)."""

from __future__ import annotations

import argparse
import asyncio
import logging
import socket

from zeroconf import IPVersion
from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf


class AsyncRunner:
    def __init__(self, ip_version: IPVersion) -> None:
        self.ip_version = ip_version
        self.aiozc: AsyncZeroconf | None = None

    async def register_services(self, infos: list[AsyncServiceInfo]) -> None:
        self.aiozc = AsyncZeroconf(ip_version=self.ip_version)
        tasks = [self.aiozc.async_register_service(info) for info in infos]
        background_tasks = await asyncio.gather(*tasks)
        await asyncio.gather(*background_tasks)
        print("Finished registration, press Ctrl-C to exit...")
        await asyncio.Event().wait()

    async def unregister_services(self, infos: list[AsyncServiceInfo]) -> None:
        assert self.aiozc is not None
        tasks = [self.aiozc.async_unregister_service(info) for info in infos]
        background_tasks = await asyncio.gather(*tasks)
        await asyncio.gather(*background_tasks)
        await self.aiozc.async_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--v6", action="store_true")
    version_group.add_argument("--v6-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6:
        ip_version = IPVersion.All
    elif args.v6_only:
        ip_version = IPVersion.V6Only
    else:
        ip_version = IPVersion.V4Only

    infos = []
    for i in range(250):
        infos.append(
            AsyncServiceInfo(
                "_http._tcp.local.",
                f"Paul's Test Web Site {i}._http._tcp.local.",
                addresses=[socket.inet_aton("127.0.0.1")],
                port=80,
                properties={"path": "/~paulsm/"},
                server=f"zcdemohost-{i}.local.",
            )
        )

    print("Registration of 250 services...")
    loop = asyncio.get_event_loop()
    runner = AsyncRunner(ip_version)
    try:
        loop.run_until_complete(runner.register_services(infos))
    except KeyboardInterrupt:
        loop.run_until_complete(runner.unregister_services(infos))
07070100000020000081ED00000000000000000000000167C7AD1600000DC2000000000000000000000000000000000000003F00000000python-zeroconf-0.146.0/examples/async_service_info_request.py#!/usr/bin/env python

"""Example of perodic dump of homekit services.

This example is useful when a user wants an ondemand
list of HomeKit devices on the network.

"""

from __future__ import annotations

import argparse
import asyncio
import logging
from typing import Any, cast

from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf

HAP_TYPE = "_hap._tcp.local."


async def async_watch_services(aiozc: AsyncZeroconf) -> None:
    zeroconf = aiozc.zeroconf
    while True:
        await asyncio.sleep(5)
        infos: list[AsyncServiceInfo] = []
        for name in zeroconf.cache.names():
            if not name.endswith(HAP_TYPE):
                continue
            infos.append(AsyncServiceInfo(HAP_TYPE, name))
        tasks = [info.async_request(aiozc.zeroconf, 3000) for info in infos]
        await asyncio.gather(*tasks)
        for info in infos:
            print(f"Info for {info.name}")
            if info:
                addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_addresses()]
                print(f"  Addresses: {', '.join(addresses)}")
                print(f"  Weight: {info.weight}, priority: {info.priority}")
                print(f"  Server: {info.server}")
                if info.properties:
                    print("  Properties are:")
                    for key, value in info.properties.items():
                        print(f"    {key!r}: {value!r}")
                else:
                    print("  No properties")
            else:
                print("  No info")
            print("\n")


class AsyncRunner:
    def __init__(self, args: Any) -> None:
        self.args = args
        self.threaded_browser: ServiceBrowser | None = None
        self.aiozc: AsyncZeroconf | None = None

    async def async_run(self) -> None:
        self.aiozc = AsyncZeroconf(ip_version=ip_version)
        assert self.aiozc is not None

        def on_service_state_change(
            zeroconf: Zeroconf,
            service_type: str,
            state_change: ServiceStateChange,
            name: str,
        ) -> None:
            """Dummy handler."""

        self.threaded_browser = ServiceBrowser(
            self.aiozc.zeroconf, [HAP_TYPE], handlers=[on_service_state_change]
        )
        await async_watch_services(self.aiozc)

    async def async_close(self) -> None:
        assert self.aiozc is not None
        assert self.threaded_browser is not None
        self.threaded_browser.cancel()
        await self.aiozc.async_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--v6", action="store_true")
    version_group.add_argument("--v6-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6:
        ip_version = IPVersion.All
    elif args.v6_only:
        ip_version = IPVersion.V6Only
    else:
        ip_version = IPVersion.V4Only

    print(f"Services with {HAP_TYPE} will be shown every 5s, press Ctrl-C to exit...")
    loop = asyncio.get_event_loop()
    runner = AsyncRunner(args)
    try:
        loop.run_until_complete(runner.async_run())
    except KeyboardInterrupt:
        loop.run_until_complete(runner.async_close())
07070100000021000081ED00000000000000000000000167C7AD1600000A8A000000000000000000000000000000000000002C00000000python-zeroconf-0.146.0/examples/browser.py#!/usr/bin/env python

"""Example of browsing for a service.

The default is HTTP and HAP; use --find to search for all available services in the network
"""

from __future__ import annotations

import argparse
import logging
from time import sleep
from typing import cast

from zeroconf import (
    IPVersion,
    ServiceBrowser,
    ServiceStateChange,
    Zeroconf,
    ZeroconfServiceTypes,
)


def on_service_state_change(
    zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
    print(f"Service {name} of type {service_type} state changed: {state_change}")

    if state_change is ServiceStateChange.Added:
        info = zeroconf.get_service_info(service_type, name)
        print(f"Info from zeroconf.get_service_info: {info!r}")

        if info:
            addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_scoped_addresses()]
            print(f"  Addresses: {', '.join(addresses)}")
            print(f"  Weight: {info.weight}, priority: {info.priority}")
            print(f"  Server: {info.server}")
            if info.properties:
                print("  Properties are:")
                for key, value in info.properties.items():
                    print(f"    {key!r}: {value!r}")
            else:
                print("  No properties")
        else:
            print("  No info")
        print("\n")


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    parser.add_argument("--find", action="store_true", help="Browse all available services")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--v6-only", action="store_true")
    version_group.add_argument("--v4-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6_only:
        ip_version = IPVersion.V6Only
    elif args.v4_only:
        ip_version = IPVersion.V4Only
    else:
        ip_version = IPVersion.All

    zeroconf = Zeroconf(ip_version=ip_version)

    services = [
        "_http._tcp.local.",
        "_hap._tcp.local.",
        "_esphomelib._tcp.local.",
        "_airplay._tcp.local.",
    ]
    if args.find:
        services = list(ZeroconfServiceTypes.find(zc=zeroconf))

    print(f"\nBrowsing {len(services)} service(s), press Ctrl-C to exit...\n")
    browser = ServiceBrowser(zeroconf, services, handlers=[on_service_state_change])

    try:
        while True:
            sleep(0.1)
    except KeyboardInterrupt:
        pass
    finally:
        zeroconf.close()
07070100000022000081ED00000000000000000000000167C7AD16000005E2000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/examples/registration.py#!/usr/bin/env python

"""Example of announcing a service (in this case, a fake HTTP server)"""

from __future__ import annotations

import argparse
import logging
import socket
from time import sleep

from zeroconf import IPVersion, ServiceInfo, Zeroconf

if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true")
    version_group = parser.add_mutually_exclusive_group()
    version_group.add_argument("--v6", action="store_true")
    version_group.add_argument("--v6-only", action="store_true")
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    if args.v6:
        ip_version = IPVersion.All
    elif args.v6_only:
        ip_version = IPVersion.V6Only
    else:
        ip_version = IPVersion.V4Only

    desc = {"path": "/~paulsm/"}

    info = ServiceInfo(
        "_http._tcp.local.",
        "Paul's Test Web Site._http._tcp.local.",
        addresses=[socket.inet_aton("127.0.0.1")],
        port=80,
        properties=desc,
        server="ash-2.local.",
    )

    zeroconf = Zeroconf(ip_version=ip_version)
    print("Registration of a service, press Ctrl-C to exit...")
    zeroconf.register_service(info)
    try:
        while True:
            sleep(0.1)
    except KeyboardInterrupt:
        pass
    finally:
        print("Unregistering...")
        zeroconf.unregister_service(info)
        zeroconf.close()
07070100000023000081ED00000000000000000000000167C7AD160000042E000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/examples/resolve_address.py#!/usr/bin/env python

"""Example of resolving a name to an IP address."""

from __future__ import annotations

import asyncio
import logging
import sys

from zeroconf import AddressResolver, IPVersion
from zeroconf.asyncio import AsyncZeroconf


async def resolve_name(name: str) -> None:
    aiozc = AsyncZeroconf()
    await aiozc.zeroconf.async_wait_for_start()
    resolver = AddressResolver(name)
    if await resolver.async_request(aiozc.zeroconf, 3000):
        print(f"{name} IP addresses:", resolver.ip_addresses_by_version(IPVersion.All))
    else:
        print(f"Name {name} not resolved")
    await aiozc.async_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)
    argv = sys.argv.copy()
    if "--debug" in argv:
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)
        argv.remove("--debug")

    if len(argv) < 2 or not argv[1]:
        raise ValueError("Usage: resolve_address.py [--debug] <name>")

    name = argv[1]
    if not name.endswith("."):
        name += "."

    asyncio.run(resolve_name(name))
07070100000024000081ED00000000000000000000000167C7AD160000023A000000000000000000000000000000000000002D00000000python-zeroconf-0.146.0/examples/resolver.py#!/usr/bin/env python

"""Example of resolving a service with a known name"""

from __future__ import annotations

import logging
import sys

from zeroconf import Zeroconf

TYPE = "_test._tcp.local."
NAME = "My Service Name"

if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)
    if len(sys.argv) > 1:
        assert sys.argv[1:] == ["--debug"]
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)

    zeroconf = Zeroconf()

    try:
        print(zeroconf.get_service_info(TYPE, NAME + "." + TYPE))
    finally:
        zeroconf.close()
07070100000025000081ED00000000000000000000000167C7AD1600000717000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/examples/self_test.py#!/usr/bin/env python
from __future__ import annotations

import logging
import socket
import sys

from zeroconf import ServiceInfo, Zeroconf, __version__

if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)
    if len(sys.argv) > 1:
        assert sys.argv[1:] == ["--debug"]
        logging.getLogger("zeroconf").setLevel(logging.DEBUG)

    # Test a few module features, including service registration, service
    # query (for Zoe), and service unregistration.
    print(f"Multicast DNS Service Discovery for Python, version {__version__}")
    r = Zeroconf()
    print("1. Testing registration of a service...")
    desc = {"version": "0.10", "a": "test value", "b": "another value"}
    addresses = [socket.inet_aton("127.0.0.1")]
    expected = {"127.0.0.1"}
    if socket.has_ipv6:
        addresses.append(socket.inet_pton(socket.AF_INET6, "::1"))
        expected.add("::1")
    info = ServiceInfo(
        "_http._tcp.local.",
        "My Service Name._http._tcp.local.",
        addresses=addresses,
        port=1234,
        properties=desc,
    )
    print("   Registering service...")
    r.register_service(info)
    print("   Registration done.")
    print("2. Testing query of service information...")
    print(f"   Getting ZOE service: {r.get_service_info('_http._tcp.local.', 'ZOE._http._tcp.local.')}")
    print("   Query done.")
    print("3. Testing query of own service...")
    queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.")
    assert queried_info
    assert set(queried_info.parsed_addresses()) == expected
    print(f"   Getting self: {queried_info}")
    print("   Query done.")
    print("4. Testing unregister of service information...")
    r.unregister_service(info)
    print("   Unregister done.")
    r.close()
07070100000026000081A400000000000000000000000167C7AD160001577D000000000000000000000000000000000000002400000000python-zeroconf-0.146.0/poetry.lock# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.

[[package]]
name = "alabaster"
version = "0.7.16"
description = "A light, configurable Sphinx theme"
optional = false
python-versions = ">=3.9"
files = [
    {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"},
    {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"},
]

[[package]]
name = "babel"
version = "2.16.0"
description = "Internationalization utilities"
optional = false
python-versions = ">=3.8"
files = [
    {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"},
    {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"},
]

[package.extras]
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]

[[package]]
name = "certifi"
version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
    {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
    {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]

[[package]]
name = "cffi"
version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.8"
files = [
    {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
    {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
    {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
    {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
    {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
    {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
    {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
    {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
    {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
    {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
    {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
    {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
    {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
    {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
    {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
    {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
    {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
    {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
    {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
    {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
    {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
    {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
    {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
    {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
    {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
    {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
    {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
    {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
    {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
    {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
    {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
    {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
    {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
    {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
    {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
    {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
    {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
    {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
    {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
    {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
    {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
    {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
    {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
    {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
    {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
    {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
    {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
    {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
    {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
    {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
    {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
    {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
    {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
    {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
    {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
    {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
    {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
    {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
    {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
    {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
    {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
    {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
    {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
    {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
    {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
    {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
    {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
]

[package.dependencies]
pycparser = "*"

[[package]]
name = "charset-normalizer"
version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
    {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
    {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
    {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
    {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
    {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
    {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
    {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
    {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
    {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
    {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
]

[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
    {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
    {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]

[[package]]
name = "coverage"
version = "7.6.10"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.9"
files = [
    {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"},
    {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"},
    {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"},
    {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"},
    {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"},
    {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"},
    {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"},
    {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"},
    {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"},
    {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"},
    {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"},
    {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"},
    {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"},
    {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"},
    {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"},
    {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"},
    {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"},
    {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"},
    {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"},
    {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"},
    {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"},
    {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"},
    {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"},
    {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"},
    {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"},
    {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"},
    {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"},
    {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"},
    {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"},
    {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"},
    {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"},
    {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"},
    {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"},
    {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"},
    {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"},
    {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"},
    {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"},
    {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"},
    {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"},
    {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"},
    {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"},
    {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"},
    {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"},
    {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"},
    {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"},
    {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"},
    {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"},
    {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"},
    {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"},
    {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"},
    {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"},
    {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"},
    {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"},
    {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"},
    {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"},
    {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"},
    {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"},
    {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"},
    {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"},
    {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"},
    {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"},
    {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"},
]

[package.dependencies]
tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}

[package.extras]
toml = ["tomli"]

[[package]]
name = "cython"
version = "3.0.12"
description = "The Cython compiler for writing C extensions in the Python language."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
files = [
    {file = "Cython-3.0.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba67eee9413b66dd9fbacd33f0bc2e028a2a120991d77b5fd4b19d0b1e4039b9"},
    {file = "Cython-3.0.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee2717e5b5f7d966d0c6e27d2efe3698c357aa4d61bb3201997c7a4f9fe485a"},
    {file = "Cython-3.0.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cffc3464f641c8d0dda942c7c53015291beea11ec4d32421bed2f13b386b819"},
    {file = "Cython-3.0.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d3a8f81980ffbd74e52f9186d8f1654e347d0c44bfea6b5997028977f481a179"},
    {file = "Cython-3.0.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8d32856716c369d01f2385ad9177cdd1a11079ac89ea0932dc4882de1aa19174"},
    {file = "Cython-3.0.12-cp310-cp310-win32.whl", hash = "sha256:712c3f31adec140dc60d064a7f84741f50e2c25a8edd7ae746d5eb4d3ef7072a"},
    {file = "Cython-3.0.12-cp310-cp310-win_amd64.whl", hash = "sha256:d6945694c5b9170cfbd5f2c0d00ef7487a2de7aba83713a64ee4ebce7fad9e05"},
    {file = "Cython-3.0.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feb86122a823937cc06e4c029d80ff69f082ebb0b959ab52a5af6cdd271c5dc3"},
    {file = "Cython-3.0.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfdbea486e702c328338314adb8e80f5f9741f06a0ae83aaec7463bc166d12e8"},
    {file = "Cython-3.0.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:563de1728c8e48869d2380a1b76bbc1b1b1d01aba948480d68c1d05e52d20c92"},
    {file = "Cython-3.0.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:398d4576c1e1f6316282aa0b4a55139254fbed965cba7813e6d9900d3092b128"},
    {file = "Cython-3.0.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1e5eadef80143026944ea8f9904715a008f5108d1d644a89f63094cc37351e73"},
    {file = "Cython-3.0.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5a93cbda00a5451175b97dea5a9440a3fcee9e54b4cba7a7dbcba9a764b22aec"},
    {file = "Cython-3.0.12-cp311-cp311-win32.whl", hash = "sha256:3109e1d44425a2639e9a677b66cd7711721a5b606b65867cb2d8ef7a97e2237b"},
    {file = "Cython-3.0.12-cp311-cp311-win_amd64.whl", hash = "sha256:d4b70fc339adba1e2111b074ee6119fe9fd6072c957d8597bce9a0dd1c3c6784"},
    {file = "Cython-3.0.12-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fe030d4a00afb2844f5f70896b7f2a1a0d7da09bf3aa3d884cbe5f73fff5d310"},
    {file = "Cython-3.0.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7fec4f052b8fe173fe70eae75091389955b9a23d5cec3d576d21c5913b49d47"},
    {file = "Cython-3.0.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0faa5e39e5c8cdf6f9c3b1c3f24972826e45911e7f5b99cf99453fca5432f45e"},
    {file = "Cython-3.0.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d53de996ed340e9ab0fc85a88aaa8932f2591a2746e1ab1c06e262bd4ec4be7"},
    {file = "Cython-3.0.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea3a0e19ab77266c738aa110684a753a04da4e709472cadeff487133354d6ab8"},
    {file = "Cython-3.0.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c151082884be468f2f405645858a857298ac7f7592729e5b54788b5c572717ba"},
    {file = "Cython-3.0.12-cp312-cp312-win32.whl", hash = "sha256:3083465749911ac3b2ce001b6bf17f404ac9dd35d8b08469d19dc7e717f5877a"},
    {file = "Cython-3.0.12-cp312-cp312-win_amd64.whl", hash = "sha256:c0b91c7ebace030dd558ea28730de8c580680b50768e5af66db2904a3716c3e3"},
    {file = "Cython-3.0.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4ee6f1ea1bead8e6cbc4e64571505b5d8dbdb3b58e679d31f3a84160cebf1a1a"},
    {file = "Cython-3.0.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57aefa6d3341109e46ec1a13e3a763aaa2cbeb14e82af2485b318194be1d9170"},
    {file = "Cython-3.0.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:879ae9023958d63c0675015369384642d0afb9c9d1f3473df9186c42f7a9d265"},
    {file = "Cython-3.0.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36fcd584dae547de6f095500a380f4a0cce72b7a7e409e9ff03cb9beed6ac7a1"},
    {file = "Cython-3.0.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:62b79dcc0de49efe9e84b9d0e2ae0a6fc9b14691a65565da727aa2e2e63c6a28"},
    {file = "Cython-3.0.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4aa255781b093a8401109d8f2104bbb2e52de7639d5896aefafddc85c30e0894"},
    {file = "Cython-3.0.12-cp313-cp313-win32.whl", hash = "sha256:77d48f2d4bab9fe1236eb753d18f03e8b2619af5b6f05d51df0532a92dfb38ab"},
    {file = "Cython-3.0.12-cp313-cp313-win_amd64.whl", hash = "sha256:86c304b20bd57c727c7357e90d5ba1a2b6f1c45492de2373814d7745ef2e63b4"},
    {file = "Cython-3.0.12-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ff5c0b6a65b08117d0534941d404833d516dac422eee88c6b4fd55feb409a5ed"},
    {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:680f1d6ed4436ae94805db264d6155ed076d2835d84f20dcb31a7a3ad7f8668c"},
    {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc24609613fa06d0d896309f7164ba168f7e8d71c1e490ed2a08d23351c3f41"},
    {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1879c073e2b34924ce9b7ca64c212705dcc416af4337c45f371242b2e5f6d32"},
    {file = "Cython-3.0.12-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:bfb75123dd4ff767baa37d7036da0de2dfb6781ff256eef69b11b88b9a0691d1"},
    {file = "Cython-3.0.12-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:f39640f8df0400cde6882e23c734f15bb8196de0a008ae5dc6c8d1ec5957d7c8"},
    {file = "Cython-3.0.12-cp36-cp36m-win32.whl", hash = "sha256:8c9efe9a0895abee3cadfdad4130b30f7b5e57f6e6a51ef2a44f9fc66a913880"},
    {file = "Cython-3.0.12-cp36-cp36m-win_amd64.whl", hash = "sha256:63d840f2975e44d74512f8f34f1f7cb8121c9428e26a3f6116ff273deb5e60a2"},
    {file = "Cython-3.0.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:75c5acd40b97cff16fadcf6901a91586cbca5dcdba81f738efaf1f4c6bc8dccb"},
    {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e62564457851db1c40399bd95a5346b9bb99e17a819bf583b362f418d8f3457a"},
    {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ccd1228cc203b1f1b8a3d403f5a20ad1c40e5879b3fbf5851ce09d948982f2c"},
    {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25529ee948f44d9a165ff960c49d4903267c20b5edf2df79b45924802e4cca6e"},
    {file = "Cython-3.0.12-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:90cf599372c5a22120609f7d3a963f17814799335d56dd0dcf8fe615980a8ae1"},
    {file = "Cython-3.0.12-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9f8c48748a9c94ea5d59c26ab49ad0fad514d36f894985879cf3c3ca0e600bf4"},
    {file = "Cython-3.0.12-cp37-cp37m-win32.whl", hash = "sha256:3e4fa855d98bc7bd6a2049e0c7dc0dcf595e2e7f571a26e808f3efd84d2db374"},
    {file = "Cython-3.0.12-cp37-cp37m-win_amd64.whl", hash = "sha256:120681093772bf3600caddb296a65b352a0d3556e962b9b147efcfb8e8c9801b"},
    {file = "Cython-3.0.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:731d719423e041242c9303c80cae4327467299b90ffe62d4cc407e11e9ea3160"},
    {file = "Cython-3.0.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3238a29f37999e27494d120983eca90d14896b2887a0bd858a381204549137a"},
    {file = "Cython-3.0.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b588c0a089a9f4dd316d2f9275230bad4a7271e5af04e1dc41d2707c816be44b"},
    {file = "Cython-3.0.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ab9f5198af74eb16502cc143cdde9ca1cbbf66ea2912e67440dd18a36e3b5fa"},
    {file = "Cython-3.0.12-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8ee841c0e114efa1e849c281ac9b8df8aa189af10b4a103b1c5fd71cbb799679"},
    {file = "Cython-3.0.12-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:43c48b5789398b228ea97499f5b864843ba9b1ab837562a9227c6f58d16ede8b"},
    {file = "Cython-3.0.12-cp38-cp38-win32.whl", hash = "sha256:5e5f17c48a4f41557fbcc7ee660ccfebe4536a34c557f553b6893c1b3c83df2d"},
    {file = "Cython-3.0.12-cp38-cp38-win_amd64.whl", hash = "sha256:309c081057930bb79dc9ea3061a1af5086c679c968206e9c9c2ec90ab7cb471a"},
    {file = "Cython-3.0.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54115fcc126840926ff3b53cfd2152eae17b3522ae7f74888f8a41413bd32f25"},
    {file = "Cython-3.0.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629db614b9c364596d7c975fa3fb3978e8c5349524353dbe11429896a783fc1e"},
    {file = "Cython-3.0.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af081838b0f9e12a83ec4c3809a00a64c817f489f7c512b0e3ecaf5f90a2a816"},
    {file = "Cython-3.0.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:34ce459808f7d8d5d4007bc5486fe50532529096b43957af6cbffcb4d9cc5c8d"},
    {file = "Cython-3.0.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6c6cd6a75c8393e6805d17f7126b96a894f310a1a9ea91c47d141fb9341bfa8"},
    {file = "Cython-3.0.12-cp39-cp39-win32.whl", hash = "sha256:a4032e48d4734d2df68235d21920c715c451ac9de15fa14c71b378e8986b83be"},
    {file = "Cython-3.0.12-cp39-cp39-win_amd64.whl", hash = "sha256:dcdc3e5d4ce0e7a4af6903ed580833015641e968d18d528d8371e2435a34132c"},
    {file = "Cython-3.0.12-py2.py3-none-any.whl", hash = "sha256:0038c9bae46c459669390e53a1ec115f8096b2e4647ae007ff1bf4e6dee92806"},
    {file = "cython-3.0.12.tar.gz", hash = "sha256:b988bb297ce76c671e28c97d017b95411010f7c77fa6623dd0bb47eed1aee1bc"},
]

[[package]]
name = "docutils"
version = "0.21.2"
description = "Docutils -- Python Documentation Utilities"
optional = false
python-versions = ">=3.9"
files = [
    {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"},
    {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"},
]

[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
    {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
    {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]

[package.extras]
test = ["pytest (>=6)"]

[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
    {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
    {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]

[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]

[[package]]
name = "ifaddr"
version = "0.2.0"
description = "Cross-platform network interface and IP address enumeration library"
optional = false
python-versions = "*"
files = [
    {file = "ifaddr-0.2.0-py3-none-any.whl", hash = "sha256:085e0305cfe6f16ab12d72e2024030f5d52674afad6911bb1eee207177b8a748"},
    {file = "ifaddr-0.2.0.tar.gz", hash = "sha256:cc0cbfcaabf765d44595825fb96a99bb12c79716b73b44330ea38ee2b0c4aed4"},
]

[[package]]
name = "imagesize"
version = "1.4.1"
description = "Getting image size from png/jpeg/jpeg2000/gif file"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
    {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
    {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
]

[[package]]
name = "importlib-metadata"
version = "8.5.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
    {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
    {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
]

[package.dependencies]
zipp = ">=3.20"

[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]

[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
    {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
    {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]

[[package]]
name = "jinja2"
version = "3.1.5"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
    {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
    {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
]

[package.dependencies]
MarkupSafe = ">=2.0"

[package.extras]
i18n = ["Babel (>=2.7)"]

[[package]]
name = "markdown-it-py"
version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
optional = false
python-versions = ">=3.8"
files = [
    {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
    {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
]

[package.dependencies]
mdurl = ">=0.1,<1.0"

[package.extras]
benchmarking = ["psutil", "pytest", "pytest-benchmark"]
code-style = ["pre-commit (>=3.0,<4.0)"]
compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
linkify = ["linkify-it-py (>=1,<3)"]
plugins = ["mdit-py-plugins"]
profiling = ["gprof2dot"]
rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]

[[package]]
name = "markupsafe"
version = "3.0.2"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.9"
files = [
    {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"},
    {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"},
    {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"},
    {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"},
    {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"},
    {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"},
    {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"},
    {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"},
]

[[package]]
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
optional = false
python-versions = ">=3.7"
files = [
    {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
    {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
]

[[package]]
name = "packaging"
version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
    {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
    {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]

[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
    {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
    {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]

[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]

[[package]]
name = "pycparser"
version = "2.22"
description = "C parser in Python"
optional = false
python-versions = ">=3.8"
files = [
    {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
    {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
]

[[package]]
name = "pygments"
version = "2.19.1"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.8"
files = [
    {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"},
    {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"},
]

[package.extras]
windows-terminal = ["colorama (>=0.4.6)"]

[[package]]
name = "pytest"
version = "8.3.5"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
    {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"},
    {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"},
]

[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=1.5,<2"
tomli = {version = ">=1", markers = "python_version < \"3.11\""}

[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]

[[package]]
name = "pytest-asyncio"
version = "0.25.3"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.9"
files = [
    {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"},
    {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"},
]

[package.dependencies]
pytest = ">=8.2,<9"

[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"]
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]

[[package]]
name = "pytest-codspeed"
version = "3.2.0"
description = "Pytest plugin to create CodSpeed benchmarks"
optional = false
python-versions = ">=3.9"
files = [
    {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5165774424c7ab8db7e7acdb539763a0e5657996effefdf0664d7fd95158d34"},
    {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9bd55f92d772592c04a55209950c50880413ae46876e66bd349ef157075ca26c"},
    {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf6f56067538f4892baa8d7ab5ef4e45bb59033be1ef18759a2c7fc55b32035"},
    {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39a687b05c3d145642061b45ea78e47e12f13ce510104d1a2cda00eee0e36f58"},
    {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46a1afaaa1ac4c2ca5b0700d31ac46d80a27612961d031067d73c6ccbd8d3c2b"},
    {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48ce3af3dfa78413ed3d69d1924043aa1519048dbff46edccf8f35a25dab3c2"},
    {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:66692506d33453df48b36a84703448cb8b22953eea51f03fbb2eb758dc2bdc4f"},
    {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:479774f80d0bdfafa16112700df4dbd31bf2a6757fac74795fd79c0a7b3c389b"},
    {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:109f9f4dd1088019c3b3f887d003b7d65f98a7736ca1d457884f5aa293e8e81c"},
    {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2f69a03b52c9bb041aec1b8ee54b7b6c37a6d0a948786effa4c71157765b6da"},
    {file = "pytest_codspeed-3.2.0-py3-none-any.whl", hash = "sha256:54b5c2e986d6a28e7b0af11d610ea57bd5531cec8326abe486f1b55b09d91c39"},
    {file = "pytest_codspeed-3.2.0.tar.gz", hash = "sha256:f9d1b1a3b2c69cdc0490a1e8b1ced44bffbd0e8e21d81a7160cfdd923f6e8155"},
]

[package.dependencies]
cffi = ">=1.17.1"
importlib-metadata = {version = ">=8.5.0", markers = "python_version < \"3.10\""}
pytest = ">=3.8"
rich = ">=13.8.1"

[package.extras]
compat = ["pytest-benchmark (>=5.0.0,<5.1.0)", "pytest-xdist (>=3.6.1,<3.7.0)"]
lint = ["mypy (>=1.11.2,<1.12.0)", "ruff (>=0.6.5,<0.7.0)"]
test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"]

[[package]]
name = "pytest-cov"
version = "6.0.0"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.9"
files = [
    {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"},
    {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"},
]

[package.dependencies]
coverage = {version = ">=7.5", extras = ["toml"]}
pytest = ">=4.6"

[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]

[[package]]
name = "pytest-timeout"
version = "2.3.1"
description = "pytest plugin to abort hanging tests"
optional = false
python-versions = ">=3.7"
files = [
    {file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"},
    {file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"},
]

[package.dependencies]
pytest = ">=7.0.0"

[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
    {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
    {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]

[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"

[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]

[[package]]
name = "rich"
version = "13.9.4"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
optional = false
python-versions = ">=3.8.0"
files = [
    {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"},
    {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"},
]

[package.dependencies]
markdown-it-py = ">=2.2.0"
pygments = ">=2.13.0,<3.0.0"
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""}

[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]

[[package]]
name = "setuptools"
version = "75.8.2"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.9"
files = [
    {file = "setuptools-75.8.2-py3-none-any.whl", hash = "sha256:558e47c15f1811c1fa7adbd0096669bf76c1d3f433f58324df69f3f5ecac4e8f"},
    {file = "setuptools-75.8.2.tar.gz", hash = "sha256:4880473a969e5f23f2a2be3646b2dfd84af9028716d398e46192f84bc36900d2"},
]

[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"]
core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"]

[[package]]
name = "snowballstemmer"
version = "2.2.0"
description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
optional = false
python-versions = "*"
files = [
    {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
    {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
]

[[package]]
name = "sphinx"
version = "7.4.7"
description = "Python documentation generator"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"},
    {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"},
]

[package.dependencies]
alabaster = ">=0.7.14,<0.8.0"
babel = ">=2.13"
colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""}
docutils = ">=0.20,<0.22"
imagesize = ">=1.3"
importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""}
Jinja2 = ">=3.1"
packaging = ">=23.0"
Pygments = ">=2.17"
requests = ">=2.30.0"
snowballstemmer = ">=2.2"
sphinxcontrib-applehelp = "*"
sphinxcontrib-devhelp = "*"
sphinxcontrib-htmlhelp = ">=2.0.0"
sphinxcontrib-jsmath = "*"
sphinxcontrib-qthelp = "*"
sphinxcontrib-serializinghtml = ">=1.1.9"
tomli = {version = ">=2", markers = "python_version < \"3.11\""}

[package.extras]
docs = ["sphinxcontrib-websupport"]
lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"]
test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"]

[[package]]
name = "sphinx-rtd-theme"
version = "3.0.2"
description = "Read the Docs theme for Sphinx"
optional = false
python-versions = ">=3.8"
files = [
    {file = "sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13"},
    {file = "sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85"},
]

[package.dependencies]
docutils = ">0.18,<0.22"
sphinx = ">=6,<9"
sphinxcontrib-jquery = ">=4,<5"

[package.extras]
dev = ["bump2version", "transifex-client", "twine", "wheel"]

[[package]]
name = "sphinxcontrib-applehelp"
version = "2.0.0"
description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"},
    {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"},
]

[package.extras]
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
standalone = ["Sphinx (>=5)"]
test = ["pytest"]

[[package]]
name = "sphinxcontrib-devhelp"
version = "2.0.0"
description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"},
    {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"},
]

[package.extras]
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
standalone = ["Sphinx (>=5)"]
test = ["pytest"]

[[package]]
name = "sphinxcontrib-htmlhelp"
version = "2.1.0"
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"},
    {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"},
]

[package.extras]
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
standalone = ["Sphinx (>=5)"]
test = ["html5lib", "pytest"]

[[package]]
name = "sphinxcontrib-jquery"
version = "4.1"
description = "Extension to include jQuery on newer Sphinx releases"
optional = false
python-versions = ">=2.7"
files = [
    {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"},
    {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"},
]

[package.dependencies]
Sphinx = ">=1.8"

[[package]]
name = "sphinxcontrib-jsmath"
version = "1.0.1"
description = "A sphinx extension which renders display math in HTML via JavaScript"
optional = false
python-versions = ">=3.5"
files = [
    {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
    {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
]

[package.extras]
test = ["flake8", "mypy", "pytest"]

[[package]]
name = "sphinxcontrib-qthelp"
version = "2.0.0"
description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"},
    {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"},
]

[package.extras]
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
standalone = ["Sphinx (>=5)"]
test = ["defusedxml (>=0.7.1)", "pytest"]

[[package]]
name = "sphinxcontrib-serializinghtml"
version = "2.0.0"
description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)"
optional = false
python-versions = ">=3.9"
files = [
    {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"},
    {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"},
]

[package.extras]
lint = ["mypy", "ruff (==0.5.5)", "types-docutils"]
standalone = ["Sphinx (>=5)"]
test = ["pytest"]

[[package]]
name = "tomli"
version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
    {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
    {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
    {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
    {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
    {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
    {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
    {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
    {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
    {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
    {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
    {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
    {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
    {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
    {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]

[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
    {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
    {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]

[[package]]
name = "urllib3"
version = "2.3.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
files = [
    {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"},
    {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"},
]

[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]

[[package]]
name = "zipp"
version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
files = [
    {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
    {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]

[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]

[metadata]
lock-version = "2.0"
python-versions = "^3.9"
content-hash = "ea903296f015035c594eb8cce08d4dedc716074e33644033938dfdb5f047d72e"
07070100000027000081A400000000000000000000000167C7AD1600001D10000000000000000000000000000000000000002700000000python-zeroconf-0.146.0/pyproject.toml[tool.poetry]
name = "zeroconf"
version = "0.146.0"
description = "A pure python implementation of multicast DNS service discovery"
authors = ["Paul Scott-Murphy", "William McBrine", "Jakub Stasiak", "J. Nick Koston"]
license = "LGPL-2.1-or-later"
readme = "README.rst"
repository = "https://github.com/python-zeroconf/python-zeroconf"
documentation = "https://python-zeroconf.readthedocs.io"
classifiers=[
	'Development Status :: 5 - Production/Stable',
	'Intended Audience :: Developers',
	'Intended Audience :: System Administrators',
	'Operating System :: POSIX',
	'Operating System :: POSIX :: Linux',
	'Operating System :: MacOS :: MacOS X',
	'Topic :: Software Development :: Libraries',
	'Programming Language :: Python :: 3.9',
	'Programming Language :: Python :: 3.10',
	'Programming Language :: Python :: 3.11',
	'Programming Language :: Python :: 3.12',
	'Programming Language :: Python :: 3.13',
	'Programming Language :: Python :: Implementation :: CPython',
	'Programming Language :: Python :: Implementation :: PyPy',
]
packages = [
    { include = "zeroconf", from = "src" },
]
include = [
    { path = "CHANGELOG.md", format = "sdist" },
    { path = "COPYING", format = "sdist" },
    { path = "docs", format = "sdist" },
    { path = "tests", format = "sdist" },
]
# Make sure we don't package temporary C files generated by the build process
exclude = [ "**/*.c" ]

[tool.poetry.urls]
"Bug Tracker" = "https://github.com/python-zeroconf/python-zeroconf/issues"
"Changelog" = "https://github.com/python-zeroconf/python-zeroconf/blob/master/CHANGELOG.md"

[tool.poetry.build]
generate-setup-file = true
script = "build_ext.py"

[tool.semantic_release]
branch = "master"
version_toml = ["pyproject.toml:tool.poetry.version"]
version_variables = [
    "src/zeroconf/__init__.py:__version__"
]
build_command = "pip install poetry && poetry build"
tag_format = "{version}"

[tool.semantic_release.changelog]
exclude_commit_patterns = [
    "chore*",
    "ci*",
]

[tool.semantic_release.changelog.environment]
keep_trailing_newline = true

[tool.semantic_release.branches.master]
match = "master"

[tool.semantic_release.branches.noop]
match = "(?!master$)"
prerelease = true

[tool.poetry.dependencies]
python = "^3.9"
ifaddr = ">=0.1.7"

[tool.poetry.group.dev.dependencies]
pytest = ">=7.2,<9.0"
pytest-cov = ">=4,<7"
pytest-asyncio = ">=0.20.3,<0.26.0"
cython = "^3.0.5"
setuptools = ">=65.6.3,<76.0.0"
pytest-timeout = "^2.1.0"
pytest-codspeed = "^3.1.0"

[tool.poetry.group.docs.dependencies]
sphinx = "^7.4.7 || ^8.1.3"
sphinx-rtd-theme = "^3.0.2"

[tool.ruff]
target-version = "py39"
line-length = 110

[tool.ruff.lint]
ignore = [
    "S101", # use of assert
    "S104",  # S104 Possible binding to all interfaces
    "PLR0912", # too many to fix right now
    "TC001", # too many to fix right now
    "TID252", # skip
    "PLR0913", # too late to make changes here
    "PLR0911", # would be breaking change
    "TRY003", # too many to fix
    "SLF001", # design choice
    "TC003", # too many to fix
    "PLR2004" , # too many to fix
    "PGH004",  # too many to fix
    "PGH003", # too many to fix
    "SIM110", # this is slower
    "FURB136", # this is slower for Cython
    "PYI034", # enable when we drop Py3.10
    "PYI032", # breaks Cython
    "PYI041",  # breaks Cython
    "FURB188", # usually slower
    "PERF401", # Cython: closures inside cpdef functions not yet supported
]
select = [
    "ASYNC", # async rules
    "B",   # flake8-bugbear
    "C4",  # flake8-comprehensions
    "S",   # flake8-bandit
    "F",   # pyflake
    "E",   # pycodestyle
    "W",   # pycodestyle
    "UP",  # pyupgrade
    "I",   # isort
    "RUF", # ruff specific
    "FLY", # flynt
    "FURB", # refurb
    "G", # flake8-logging-format   ,
    "PERF", # Perflint
    "PGH", # pygrep-hooks
    "PIE", # flake8-pie
    "PL", # pylint
    "PT", # flake8-pytest-style
    "PTH", # flake8-pathlib
    "PYI", # flake8-pyi
    "RET", # flake8-return
    "RSE", # flake8-raise    ,
    "SIM", # flake8-simplify
    "SLF", # flake8-self
    "SLOT", # flake8-slots
    "T100", # Trace found: {name} used
    "T20", # flake8-print
    "TC", # flake8-type-checking
    "TID", # Tidy imports
    "TRY", # tryceratops
]

[tool.ruff.lint.per-file-ignores]
"tests/**/*" = [
    "D100",
    "D101",
    "D102",
    "D103",
    "D104",
    "S101",
    "SLF001",
    "PLR2004", # too many to fix right now
    "PT011", # too many to fix right now
    "PT006", # too many to fix right now
    "PGH003", # too many to fix right now
    "PT007", # too many to fix right now
    "PT027",  # too many to fix right now
    "PLW0603" , # too many to fix right now
    "PLR0915", # too many to fix right now
    "FLY002", # too many to fix right now
    "PT018", # too many to fix right now
    "PLR0124", # too many to fix right now
    "SIM202" , # too many to fix right now
    "PT012" , # too many to fix right now
    "TID252", # too many to fix right now
    "PLR0913", # skip this one
    "SIM102" , # too many to fix right now
    "SIM108", # too many to fix right now
    "TC003",  # too many to fix right now
    "TC002", # too many to fix right now
    "T201", # too many to fix right now
]
"bench/**/*" = [
    "T201", # intended
]
"examples/**/*" = [
    "T201", # intended
]
"setup.py" = ["D100"]
"conftest.py" = ["D100"]
"docs/conf.py" = ["D100"]

[tool.pylint.BASIC]
class-const-naming-style = "any"
good-names = [
    "e",
    "er",
    "h",
    "i",
    "id",
    "ip",
    "os",
    "n",
    "rr",
    "rs",
    "s",
    "t",
    "wr",
    "zc",
    "_GLOBAL_DONE",
]

[tool.pylint."MESSAGES CONTROL"]
disable = [
    "duplicate-code",
    "fixme",
    "format",
    "missing-class-docstring",
    "missing-function-docstring",
    "too-few-public-methods",
    "too-many-arguments",
    "too-many-instance-attributes",
    "too-many-public-methods"
]


[tool.pytest.ini_options]
addopts = "-v -Wdefault --cov=zeroconf --cov-report=term-missing:skip-covered"
pythonpath = ["src"]

[tool.coverage.run]
branch = true

[tool.coverage.report]
exclude_lines = [
    "pragma: no cover",
    "@overload",
    "if TYPE_CHECKING",
    "raise NotImplementedError",
]


[tool.isort]
profile = "black"
known_first_party = ["zeroconf", "tests"]

[tool.mypy]
warn_unused_configs = true
check_untyped_defs = true
disallow_any_generics = false  # turn this on when we drop 3.7/3.8 support
disallow_incomplete_defs = true
disallow_untyped_defs = true
warn_incomplete_stub = true
mypy_path = "src/"
show_error_codes = true
warn_redundant_casts = false  # Activate for cleanup.
warn_return_any = true
warn_unreachable = true
warn_unused_ignores = false  # Does not always work properly, activate for cleanup.
extra_checks = true
strict_equality = true
strict_bytes = true  # Will be true by default with mypy v2 release.
exclude = [
    'docs/*',
    'bench/*',
]

[[tool.mypy.overrides]]
module = "tests.*"
allow_untyped_defs = true

[[tool.mypy.overrides]]
module = "docs.*"
ignore_errors = true
allow_untyped_defs = true

[[tool.mypy.overrides]]
module = "bench.*"
ignore_errors = true

[build-system]
# 1.5.2 required for https://github.com/python-poetry/poetry/issues/7505
requires = ['setuptools>=65.4.1', 'wheel', 'Cython>=3.0.8', "poetry-core>=1.5.2"]
build-backend = "poetry.core.masonry.api"

[tool.codespell]
ignore-words-list = ["additionals", "HASS"]

[tool.cython-lint]
max-line-length = 110
ignore = ['E501'] # too many to fix right now
07070100000028000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000001C00000000python-zeroconf-0.146.0/src07070100000029000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002500000000python-zeroconf-0.146.0/src/zeroconf0707010000002A000081A400000000000000000000000167C7AD1600000E26000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/src/zeroconf/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from ._cache import DNSCache  # noqa # import needed for backwards compat
from ._core import Zeroconf
from ._dns import (  # noqa # import needed for backwards compat
    DNSAddress,
    DNSEntry,
    DNSHinfo,
    DNSNsec,
    DNSPointer,
    DNSQuestion,
    DNSQuestionType,
    DNSRecord,
    DNSService,
    DNSText,
)
from ._exceptions import (
    AbstractMethodException,
    BadTypeInNameException,
    Error,
    EventLoopBlocked,
    IncomingDecodeError,
    NamePartTooLongException,
    NonUniqueNameException,
    NotRunningException,
    ServiceNameAlreadyRegistered,
)
from ._logger import QuietLogger, log  # noqa # import needed for backwards compat
from ._protocol.incoming import DNSIncoming  # noqa # import needed for backwards compat
from ._protocol.outgoing import DNSOutgoing  # noqa # import needed for backwards compat
from ._record_update import RecordUpdate
from ._services import (  # noqa # import needed for backwards compat
    ServiceListener,
    ServiceStateChange,
    Signal,
    SignalRegistrationInterface,
)
from ._services.browser import ServiceBrowser
from ._services.info import (  # noqa # import needed for backwards compat
    AddressResolver,
    AddressResolverIPv4,
    AddressResolverIPv6,
    ServiceInfo,
    instance_name_from_service_info,
)
from ._services.registry import (  # noqa # import needed for backwards compat
    ServiceRegistry,
)
from ._services.types import ZeroconfServiceTypes
from ._updates import RecordUpdateListener
from ._utils.name import service_type_name  # noqa # import needed for backwards compat
from ._utils.net import (  # noqa # import needed for backwards compat
    InterfaceChoice,
    InterfacesType,
    IPVersion,
    add_multicast_member,
    autodetect_ip_version,
    create_sockets,
    get_all_addresses,
    get_all_addresses_v6,
)
from ._utils.time import (  # noqa # import needed for backwards compat
    current_time_millis,
    millis_to_seconds,
)

__author__ = "Paul Scott-Murphy, William McBrine"
__maintainer__ = "Jakub Stasiak <jakub@stasiak.at>"
__version__ = "0.146.0"
__license__ = "LGPL"


__all__ = [
    "AbstractMethodException",
    "BadTypeInNameException",
    "DNSQuestionType",
    # Exceptions
    "Error",
    "EventLoopBlocked",
    "IPVersion",
    "IncomingDecodeError",
    "InterfaceChoice",
    "NamePartTooLongException",
    "NonUniqueNameException",
    "NotRunningException",
    "RecordUpdate",
    "RecordUpdateListener",
    "ServiceBrowser",
    "ServiceInfo",
    "ServiceListener",
    "ServiceNameAlreadyRegistered",
    "ServiceStateChange",
    "Zeroconf",
    "ZeroconfServiceTypes",
    "__version__",
    "current_time_millis",
]
0707010000002B000081A400000000000000000000000167C7AD1600000990000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/src/zeroconf/_cache.pxdimport cython

from ._dns cimport (
    DNSAddress,
    DNSEntry,
    DNSHinfo,
    DNSNsec,
    DNSPointer,
    DNSRecord,
    DNSService,
    DNSText,
)

cdef object heappop
cdef object heappush
cdef object heapify

cdef object _UNIQUE_RECORD_TYPES
cdef unsigned int _TYPE_PTR
cdef cython.uint _ONE_SECOND
cdef unsigned int _MIN_SCHEDULED_RECORD_EXPIRATION


@cython.locals(record_cache=dict)
cdef _remove_key(cython.dict cache, object key, DNSRecord record)


cdef class DNSCache:

    cdef public cython.dict cache
    cdef public cython.dict service_cache
    cdef public list _expire_heap
    cdef public dict _expirations

    cpdef bint async_add_records(self, object entries)

    cpdef void async_remove_records(self, object entries)

    @cython.locals(store=cython.dict)
    cpdef DNSRecord async_get_unique(self, DNSRecord entry)

    @cython.locals(record=DNSRecord, when_record=tuple, when=double)
    cpdef list async_expire(self, double now)

    @cython.locals(records=cython.dict, record=DNSRecord)
    cpdef list async_all_by_details(self, str name, unsigned int type_, unsigned int class_)

    cpdef list async_entries_with_name(self, str name)

    cpdef list async_entries_with_server(self, str name)

    @cython.locals(cached_entry=DNSRecord, records=dict)
    cpdef DNSRecord get_by_details(self, str name, unsigned int type_, unsigned int class_)

    @cython.locals(records=cython.dict, entry=DNSRecord)
    cpdef cython.list get_all_by_details(self, str name, unsigned int type_, unsigned int class_)

    @cython.locals(
        store=cython.dict,
        service_store=cython.dict,
        service_record=DNSService,
        when=object,
        new=bint
    )
    cdef bint _async_add(self, DNSRecord record)

    @cython.locals(service_record=DNSService)
    cdef void _async_remove(self, DNSRecord record)

    @cython.locals(record=DNSRecord, created_double=double)
    cpdef void async_mark_unique_records_older_than_1s_to_expire(self, cython.set unique_types, object answers, double now)

    @cython.locals(entries=dict)
    cpdef list entries_with_name(self, str name)

    @cython.locals(entries=dict)
    cpdef list entries_with_server(self, str server)

    @cython.locals(record=DNSRecord, now=double)
    cpdef current_entry_with_name_and_alias(self, str name, str alias)

    cpdef void _async_set_created_ttl(
        self,
        DNSRecord record,
        double now,
        cython.float ttl
    )
0707010000002C000081A400000000000000000000000167C7AD160000314B000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/src/zeroconf/_cache.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from collections.abc import Iterable
from heapq import heapify, heappop, heappush
from typing import Union, cast

from ._dns import (
    DNSAddress,
    DNSEntry,
    DNSHinfo,
    DNSNsec,
    DNSPointer,
    DNSRecord,
    DNSService,
    DNSText,
)
from ._utils.time import current_time_millis
from .const import _ONE_SECOND, _TYPE_PTR

_UNIQUE_RECORD_TYPES = (DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService)
_UniqueRecordsType = Union[DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService]
_DNSRecordCacheType = dict[str, dict[DNSRecord, DNSRecord]]
_DNSRecord = DNSRecord
_str = str
_float = float
_int = int

# The minimum number of scheduled record expirations before we start cleaning up
# the expiration heap. This is a performance optimization to avoid cleaning up the
# heap too often when there are only a few scheduled expirations.
_MIN_SCHEDULED_RECORD_EXPIRATION = 100


def _remove_key(cache: _DNSRecordCacheType, key: _str, record: _DNSRecord) -> None:
    """Remove a key from a DNSRecord cache

    This function must be run in from event loop.
    """
    record_cache = cache[key]
    del record_cache[record]
    if not record_cache:
        del cache[key]


class DNSCache:
    """A cache of DNS entries."""

    def __init__(self) -> None:
        self.cache: _DNSRecordCacheType = {}
        self._expire_heap: list[tuple[float, DNSRecord]] = []
        self._expirations: dict[DNSRecord, float] = {}
        self.service_cache: _DNSRecordCacheType = {}

    # Functions prefixed with async_ are NOT threadsafe and must
    # be run in the event loop.

    def _async_add(self, record: _DNSRecord) -> bool:
        """Adds an entry.

        Returns true if the entry was not already in the cache.

        This function must be run in from event loop.
        """
        # Previously storage of records was implemented as a list
        # instead a dict. Since DNSRecords are now hashable, the implementation
        # uses a dict to ensure that adding a new record to the cache
        # replaces any existing records that are __eq__ to each other which
        # removes the risk that accessing the cache from the wrong
        # direction would return the old incorrect entry.
        if (store := self.cache.get(record.key)) is None:
            store = self.cache[record.key] = {}
        new = record not in store and not isinstance(record, DNSNsec)
        store[record] = record
        when = record.created + (record.ttl * 1000)
        if self._expirations.get(record) != when:
            # Avoid adding duplicates to the heap
            heappush(self._expire_heap, (when, record))
            self._expirations[record] = when

        if isinstance(record, DNSService):
            service_record = record
            if (service_store := self.service_cache.get(service_record.server_key)) is None:
                service_store = self.service_cache[service_record.server_key] = {}
            service_store[service_record] = service_record
        return new

    def async_add_records(self, entries: Iterable[DNSRecord]) -> bool:
        """Add multiple records.

        Returns true if any of the records were not in the cache.

        This function must be run in from event loop.
        """
        new = False
        for entry in entries:
            if self._async_add(entry):
                new = True
        return new

    def _async_remove(self, record: _DNSRecord) -> None:
        """Removes an entry.

        This function must be run in from event loop.
        """
        if isinstance(record, DNSService):
            service_record = record
            _remove_key(self.service_cache, service_record.server_key, service_record)
        _remove_key(self.cache, record.key, record)
        self._expirations.pop(record, None)

    def async_remove_records(self, entries: Iterable[DNSRecord]) -> None:
        """Remove multiple records.

        This function must be run in from event loop.
        """
        for entry in entries:
            self._async_remove(entry)

    def async_expire(self, now: _float) -> list[DNSRecord]:
        """Purge expired entries from the cache.

        This function must be run in from event loop.

        :param now: The current time in milliseconds.
        """
        if not (expire_heap_len := len(self._expire_heap)):
            return []

        expired: list[DNSRecord] = []
        # Find any expired records and add them to the to-delete list
        while self._expire_heap:
            when_record = self._expire_heap[0]
            when = when_record[0]
            if when > now:
                break
            heappop(self._expire_heap)
            # Check if the record hasn't been re-added to the heap
            # with a different expiration time as it will be removed
            # later when it reaches the top of the heap and its
            # expiration time is met.
            record = when_record[1]
            if self._expirations.get(record) == when:
                expired.append(record)

        # If the expiration heap grows larger than the number expirations
        # times two, we clean it up to avoid keeping expired entries in
        # the heap and consuming memory. We guard this with a minimum
        # threshold to avoid cleaning up the heap too often when there are
        # only a few scheduled expirations.
        if (
            expire_heap_len > _MIN_SCHEDULED_RECORD_EXPIRATION
            and expire_heap_len > len(self._expirations) * 2
        ):
            # Remove any expired entries from the expiration heap
            # that do not match the expiration time in the expirations
            # as it means the record has been re-added to the heap
            # with a different expiration time.
            self._expire_heap = [
                entry for entry in self._expire_heap if self._expirations.get(entry[1]) == entry[0]
            ]
            heapify(self._expire_heap)

        self.async_remove_records(expired)
        return expired

    def async_get_unique(self, entry: _UniqueRecordsType) -> DNSRecord | None:
        """Gets a unique entry by key.  Will return None if there is no
        matching entry.

        This function is not threadsafe and must be called from
        the event loop.
        """
        store = self.cache.get(entry.key)
        if store is None:
            return None
        return store.get(entry)

    def async_all_by_details(self, name: _str, type_: _int, class_: _int) -> list[DNSRecord]:
        """Gets all matching entries by details.

        This function is not thread-safe and must be called from
        the event loop.
        """
        key = name.lower()
        records = self.cache.get(key)
        matches: list[DNSRecord] = []
        if records is None:
            return matches
        for record in records.values():
            if type_ == record.type and class_ == record.class_:
                matches.append(record)
        return matches

    def async_entries_with_name(self, name: str) -> list[DNSRecord]:
        """Returns a dict of entries whose key matches the name.

        This function is not threadsafe and must be called from
        the event loop.
        """
        return self.entries_with_name(name)

    def async_entries_with_server(self, name: str) -> list[DNSRecord]:
        """Returns a dict of entries whose key matches the server.

        This function is not threadsafe and must be called from
        the event loop.
        """
        return self.entries_with_server(name)

    # The below functions are threadsafe and do not need to be run in the
    # event loop, however they all make copies so they significantly
    # inefficient.

    def get(self, entry: DNSEntry) -> DNSRecord | None:
        """Gets an entry by key.  Will return None if there is no
        matching entry."""
        if isinstance(entry, _UNIQUE_RECORD_TYPES):
            return self.cache.get(entry.key, {}).get(entry)
        for cached_entry in reversed(list(self.cache.get(entry.key, {}).values())):
            if entry.__eq__(cached_entry):
                return cached_entry
        return None

    def get_by_details(self, name: str, type_: _int, class_: _int) -> DNSRecord | None:
        """Gets the first matching entry by details. Returns None if no entries match.

        Calling this function is not recommended as it will only
        return one record even if there are multiple entries.

        For example if there are multiple A or AAAA addresses this
        function will return the last one that was added to the cache
        which may not be the one you expect.

        Use get_all_by_details instead.
        """
        key = name.lower()
        records = self.cache.get(key)
        if records is None:
            return None
        for cached_entry in reversed(list(records.values())):
            if type_ == cached_entry.type and class_ == cached_entry.class_:
                return cached_entry
        return None

    def get_all_by_details(self, name: str, type_: _int, class_: _int) -> list[DNSRecord]:
        """Gets all matching entries by details."""
        key = name.lower()
        records = self.cache.get(key)
        if records is None:
            return []
        return [entry for entry in list(records.values()) if type_ == entry.type and class_ == entry.class_]

    def entries_with_server(self, server: str) -> list[DNSRecord]:
        """Returns a list of entries whose server matches the name."""
        if entries := self.service_cache.get(server.lower()):
            return list(entries.values())
        return []

    def entries_with_name(self, name: str) -> list[DNSRecord]:
        """Returns a list of entries whose key matches the name."""
        if entries := self.cache.get(name.lower()):
            return list(entries.values())
        return []

    def current_entry_with_name_and_alias(self, name: str, alias: str) -> DNSRecord | None:
        now = current_time_millis()
        for record in reversed(self.entries_with_name(name)):
            if (
                record.type == _TYPE_PTR
                and not record.is_expired(now)
                and cast(DNSPointer, record).alias == alias
            ):
                return record
        return None

    def names(self) -> list[str]:
        """Return a copy of the list of current cache names."""
        return list(self.cache)

    def async_mark_unique_records_older_than_1s_to_expire(
        self,
        unique_types: set[tuple[_str, _int, _int]],
        answers: Iterable[DNSRecord],
        now: _float,
    ) -> None:
        # rfc6762#section-10.2 para 2
        # Since unique is set, all old records with that name, rrtype,
        # and rrclass that were received more than one second ago are declared
        # invalid, and marked to expire from the cache in one second.
        answers_rrset = set(answers)
        for name, type_, class_ in unique_types:
            for record in self.async_all_by_details(name, type_, class_):
                created_double = record.created
                if (now - created_double > _ONE_SECOND) and record not in answers_rrset:
                    # Expire in 1s
                    self._async_set_created_ttl(record, now, 1)

    def _async_set_created_ttl(self, record: DNSRecord, now: _float, ttl: _float) -> None:
        """Set the created time and ttl of a record."""
        # It would be better if we made a copy instead of mutating the record
        # in place, but records currently don't have a copy method.
        record._set_created_ttl(now, ttl)
        self._async_add(record)
0707010000002D000081A400000000000000000000000167C7AD1600006A3C000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/src/zeroconf/_core.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import logging
import sys
import threading
from collections.abc import Awaitable
from types import TracebackType

from ._cache import DNSCache
from ._dns import DNSQuestion, DNSQuestionType
from ._engine import AsyncEngine
from ._exceptions import NonUniqueNameException, NotRunningException
from ._handlers.multicast_outgoing_queue import MulticastOutgoingQueue
from ._handlers.query_handler import QueryHandler
from ._handlers.record_manager import RecordManager
from ._history import QuestionHistory
from ._logger import QuietLogger, log
from ._protocol.outgoing import DNSOutgoing
from ._services import ServiceListener
from ._services.browser import ServiceBrowser
from ._services.info import (
    AsyncServiceInfo,
    ServiceInfo,
    instance_name_from_service_info,
)
from ._services.registry import ServiceRegistry
from ._transport import _WrappedTransport
from ._updates import RecordUpdateListener
from ._utils.asyncio import (
    _resolve_all_futures_to_none,
    await_awaitable,
    get_running_loop,
    run_coro_with_timeout,
    shutdown_loop,
    wait_for_future_set_or_timeout,
    wait_future_or_timeout,
)
from ._utils.name import service_type_name
from ._utils.net import (
    InterfaceChoice,
    InterfacesType,
    IPVersion,
    autodetect_ip_version,
    can_send_to,
    create_sockets,
)
from ._utils.time import current_time_millis, millis_to_seconds
from .const import (
    _CHECK_TIME,
    _CLASS_IN,
    _CLASS_UNIQUE,
    _FLAGS_AA,
    _FLAGS_QR_QUERY,
    _FLAGS_QR_RESPONSE,
    _MAX_MSG_ABSOLUTE,
    _MDNS_ADDR,
    _MDNS_ADDR6,
    _MDNS_PORT,
    _ONE_SECOND,
    _REGISTER_TIME,
    _STARTUP_TIMEOUT,
    _TYPE_PTR,
    _UNREGISTER_TIME,
)

# The maximum amount of time to delay a multicast
# response in order to aggregate answers
_AGGREGATION_DELAY = 500  # ms
# The maximum amount of time to delay a multicast
# response in order to aggregate answers after
# it has already been delayed to protect the network
# from excessive traffic. We use a shorter time
# window here as we want to _try_ to answer all
# queries in under 1350ms while protecting
# the network from excessive traffic to ensure
# a service info request with two questions
# can be answered in the default timeout of
# 3000ms
_PROTECTED_AGGREGATION_DELAY = 200  # ms

_REGISTER_BROADCASTS = 3


def async_send_with_transport(
    log_debug: bool,
    transport: _WrappedTransport,
    packet: bytes,
    packet_num: int,
    out: DNSOutgoing,
    addr: str | None,
    port: int,
    v6_flow_scope: tuple[()] | tuple[int, int] = (),
) -> None:
    ipv6_socket = transport.is_ipv6
    if addr is None:
        real_addr = _MDNS_ADDR6 if ipv6_socket else _MDNS_ADDR
    else:
        real_addr = addr
        if not can_send_to(ipv6_socket, real_addr):
            return
    if log_debug:
        log.debug(
            "Sending to (%s, %d) via [socket %s (%s)] (%d bytes #%d) %r as %r...",
            real_addr,
            port or _MDNS_PORT,
            transport.fileno,
            transport.sock_name,
            len(packet),
            packet_num + 1,
            out,
            packet,
        )
    # Get flowinfo and scopeid for the IPV6 socket to create a complete IPv6
    # address tuple: https://docs.python.org/3.6/library/socket.html#socket-families
    if ipv6_socket and not v6_flow_scope:
        _, _, sock_flowinfo, sock_scopeid = transport.sock_name
        v6_flow_scope = (sock_flowinfo, sock_scopeid)
    transport.transport.sendto(packet, (real_addr, port or _MDNS_PORT, *v6_flow_scope))


class Zeroconf(QuietLogger):
    """Implementation of Zeroconf Multicast DNS Service Discovery

    Supports registration, unregistration, queries and browsing.
    """

    def __init__(
        self,
        interfaces: InterfacesType = InterfaceChoice.All,
        unicast: bool = False,
        ip_version: IPVersion | None = None,
        apple_p2p: bool = False,
    ) -> None:
        """Creates an instance of the Zeroconf class, establishing
        multicast communications, listening and reaping threads.

        :param interfaces: :class:`InterfaceChoice` or a list of IP addresses
            (IPv4 and IPv6) and interface indexes (IPv6 only).

            IPv6 notes for non-POSIX systems:
            * `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
              on Python versions before 3.8.

            Also listening on loopback (``::1``) doesn't work, use a real address.
        :param ip_version: IP versions to support. If `choice` is a list, the default is detected
            from it. Otherwise defaults to V4 only for backward compatibility.
        :param apple_p2p: use AWDL interface (only macOS)
        """
        if ip_version is None:
            ip_version = autodetect_ip_version(interfaces)

        self.done = False

        if apple_p2p and sys.platform != "darwin":
            raise RuntimeError("Option `apple_p2p` is not supported on non-Apple platforms.")

        self.unicast = unicast
        listen_socket, respond_sockets = create_sockets(interfaces, unicast, ip_version, apple_p2p=apple_p2p)
        log.debug("Listen socket %s, respond sockets %s", listen_socket, respond_sockets)

        self.engine = AsyncEngine(self, listen_socket, respond_sockets)

        self.browsers: dict[ServiceListener, ServiceBrowser] = {}
        self.registry = ServiceRegistry()
        self.cache = DNSCache()
        self.question_history = QuestionHistory()

        self.out_queue = MulticastOutgoingQueue(self, 0, _AGGREGATION_DELAY)
        self.out_delay_queue = MulticastOutgoingQueue(self, _ONE_SECOND, _PROTECTED_AGGREGATION_DELAY)

        self.query_handler = QueryHandler(self)
        self.record_manager = RecordManager(self)

        self._notify_futures: set[asyncio.Future] = set()
        self.loop: asyncio.AbstractEventLoop | None = None
        self._loop_thread: threading.Thread | None = None

        self.start()

    @property
    def started(self) -> bool:
        """Check if the instance has started."""
        running_future = self.engine.running_future
        return bool(
            not self.done
            and running_future
            and running_future.done()
            and not running_future.cancelled()
            and not running_future.exception()
            and running_future.result()
        )

    def start(self) -> None:
        """Start Zeroconf."""
        self.loop = get_running_loop()
        if self.loop:
            self.engine.setup(self.loop, None)
            return
        self._start_thread()

    def _start_thread(self) -> None:
        """Start a thread with a running event loop."""
        loop_thread_ready = threading.Event()

        def _run_loop() -> None:
            self.loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self.loop)
            self.engine.setup(self.loop, loop_thread_ready)
            self.loop.run_forever()

        self._loop_thread = threading.Thread(target=_run_loop, daemon=True)
        self._loop_thread.start()
        loop_thread_ready.wait()

    async def async_wait_for_start(self, timeout: float = _STARTUP_TIMEOUT) -> None:
        """Wait for start up for actions that require a running Zeroconf instance.

        Throws NotRunningException if the instance is not running or could
        not be started.
        """
        if self.done:  # If the instance was shutdown from under us, raise immediately
            raise NotRunningException
        assert self.engine.running_future is not None
        await wait_future_or_timeout(self.engine.running_future, timeout=timeout)
        if not self.started:
            raise NotRunningException

    @property
    def listeners(self) -> set[RecordUpdateListener]:
        return self.record_manager.listeners

    async def async_wait(self, timeout: float) -> None:
        """Calling task waits for a given number of milliseconds or until notified."""
        loop = self.loop
        assert loop is not None
        await wait_for_future_set_or_timeout(loop, self._notify_futures, timeout)

    def notify_all(self) -> None:
        """Notifies all waiting threads and notify listeners."""
        assert self.loop is not None
        self.loop.call_soon_threadsafe(self.async_notify_all)

    def async_notify_all(self) -> None:
        """Schedule an async_notify_all."""
        notify_futures = self._notify_futures
        if notify_futures:
            _resolve_all_futures_to_none(notify_futures)

    def get_service_info(
        self,
        type_: str,
        name: str,
        timeout: int = 3000,
        question_type: DNSQuestionType | None = None,
    ) -> ServiceInfo | None:
        """Returns network's service information for a particular
        name and type, or None if no service matches by the timeout,
        which defaults to 3 seconds.

        :param type_: fully qualified service type name
        :param name: the name of the service
        :param timeout: milliseconds to wait for a response
        :param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
        """
        info = ServiceInfo(type_, name)
        if info.request(self, timeout, question_type):
            return info
        return None

    def add_service_listener(self, type_: str, listener: ServiceListener) -> None:
        """Adds a listener for a particular service type.  This object
        will then have its add_service and remove_service methods called when
        services of that type become available and unavailable."""
        self.remove_service_listener(listener)
        self.browsers[listener] = ServiceBrowser(self, type_, listener)

    def remove_service_listener(self, listener: ServiceListener) -> None:
        """Removes a listener from the set that is currently listening."""
        if listener in self.browsers:
            self.browsers[listener].cancel()
            del self.browsers[listener]

    def remove_all_service_listeners(self) -> None:
        """Removes a listener from the set that is currently listening."""
        for listener in list(self.browsers):
            self.remove_service_listener(listener)

    def register_service(
        self,
        info: ServiceInfo,
        ttl: int | None = None,
        allow_name_change: bool = False,
        cooperating_responders: bool = False,
        strict: bool = True,
    ) -> None:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service.  The name of the service may be changed if needed to make
        it unique on the network. Additionally multiple cooperating responders
        can register the same service on the network for resilience
        (if you want this behavior set `cooperating_responders` to `True`).

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `register_service` cannot be completed.
        """
        assert self.loop is not None
        run_coro_with_timeout(
            await_awaitable(
                self.async_register_service(info, ttl, allow_name_change, cooperating_responders, strict)
            ),
            self.loop,
            _REGISTER_TIME * _REGISTER_BROADCASTS,
        )

    async def async_register_service(
        self,
        info: ServiceInfo,
        ttl: int | None = None,
        allow_name_change: bool = False,
        cooperating_responders: bool = False,
        strict: bool = True,
    ) -> Awaitable:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service.  The name of the service may be changed if needed to make
        it unique on the network. Additionally multiple cooperating responders
        can register the same service on the network for resilience
        (if you want this behavior set `cooperating_responders` to `True`)."""
        if ttl is not None:
            # ttl argument is used to maintain backward compatibility
            # Setting TTLs via ServiceInfo is preferred
            info.host_ttl = ttl
            info.other_ttl = ttl

        info.set_server_if_missing()
        await self.async_wait_for_start()
        await self.async_check_service(info, allow_name_change, cooperating_responders, strict)
        self.registry.async_add(info)
        return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))

    def update_service(self, info: ServiceInfo) -> None:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service.

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `async_update_service` cannot be completed.
        """
        assert self.loop is not None
        run_coro_with_timeout(
            await_awaitable(self.async_update_service(info)),
            self.loop,
            _REGISTER_TIME * _REGISTER_BROADCASTS,
        )

    async def async_update_service(self, info: ServiceInfo) -> Awaitable:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service."""
        self.registry.async_update(info)
        return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))

    async def async_get_service_info(
        self,
        type_: str,
        name: str,
        timeout: int = 3000,
        question_type: DNSQuestionType | None = None,
    ) -> AsyncServiceInfo | None:
        """Returns network's service information for a particular
        name and type, or None if no service matches by the timeout,
        which defaults to 3 seconds.

        :param type_: fully qualified service type name
        :param name: the name of the service
        :param timeout: milliseconds to wait for a response
        :param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
        """
        info = AsyncServiceInfo(type_, name)
        if await info.async_request(self, timeout, question_type):
            return info
        return None

    async def _async_broadcast_service(
        self,
        info: ServiceInfo,
        interval: int,
        ttl: int | None,
        broadcast_addresses: bool = True,
    ) -> None:
        """Send a broadcasts to announce a service at intervals."""
        for i in range(_REGISTER_BROADCASTS):
            if i != 0:
                await asyncio.sleep(millis_to_seconds(interval))
            self.async_send(self.generate_service_broadcast(info, ttl, broadcast_addresses))

    def generate_service_broadcast(
        self,
        info: ServiceInfo,
        ttl: int | None,
        broadcast_addresses: bool = True,
    ) -> DNSOutgoing:
        """Generate a broadcast to announce a service."""
        out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
        self._add_broadcast_answer(out, info, ttl, broadcast_addresses)
        return out

    def generate_service_query(self, info: ServiceInfo) -> DNSOutgoing:  # pylint: disable=no-self-use
        """Generate a query to lookup a service."""
        out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
        # https://datatracker.ietf.org/doc/html/rfc6762#section-8.1
        # Because of the mDNS multicast rate-limiting
        # rules, the probes SHOULD be sent as "QU" questions with the unicast-
        # response bit set, to allow a defending host to respond immediately
        # via unicast, instead of potentially having to wait before replying
        # via multicast.
        #
        # _CLASS_UNIQUE is the "QU" bit
        out.add_question(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN | _CLASS_UNIQUE))
        out.add_authorative_answer(info.dns_pointer())
        return out

    def _add_broadcast_answer(  # pylint: disable=no-self-use
        self,
        out: DNSOutgoing,
        info: ServiceInfo,
        override_ttl: int | None,
        broadcast_addresses: bool = True,
    ) -> None:
        """Add answers to broadcast a service."""
        current_time_millis()
        other_ttl = None if override_ttl is None else override_ttl
        host_ttl = None if override_ttl is None else override_ttl
        out.add_answer_at_time(info.dns_pointer(override_ttl=other_ttl), 0)
        out.add_answer_at_time(info.dns_service(override_ttl=host_ttl), 0)
        out.add_answer_at_time(info.dns_text(override_ttl=other_ttl), 0)
        if broadcast_addresses:
            for record in info.get_address_and_nsec_records(override_ttl=host_ttl):
                out.add_answer_at_time(record, 0)

    def unregister_service(self, info: ServiceInfo) -> None:
        """Unregister a service.

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `async_unregister_service` cannot be completed.
        """
        assert self.loop is not None
        run_coro_with_timeout(
            self.async_unregister_service(info),
            self.loop,
            _UNREGISTER_TIME * _REGISTER_BROADCASTS,
        )

    async def async_unregister_service(self, info: ServiceInfo) -> Awaitable:
        """Unregister a service."""
        info.set_server_if_missing()
        self.registry.async_remove(info)
        # If another server uses the same addresses, we do not want to send
        # goodbye packets for the address records

        assert info.server_key is not None
        entries = self.registry.async_get_infos_server(info.server_key)
        broadcast_addresses = not bool(entries)
        return asyncio.ensure_future(
            self._async_broadcast_service(info, _UNREGISTER_TIME, 0, broadcast_addresses)
        )

    def generate_unregister_all_services(self) -> DNSOutgoing | None:
        """Generate a DNSOutgoing goodbye for all services and remove them from the registry."""
        service_infos = self.registry.async_get_service_infos()
        if not service_infos:
            return None
        out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
        for info in service_infos:
            self._add_broadcast_answer(out, info, 0)
        self.registry.async_remove(service_infos)
        return out

    async def async_unregister_all_services(self) -> None:
        """Unregister all registered services.

        Unlike async_register_service and async_unregister_service, this
        method does not return a future and is always expected to be
        awaited since its only called at shutdown.
        """
        # Send Goodbye packets https://datatracker.ietf.org/doc/html/rfc6762#section-10.1
        out = self.generate_unregister_all_services()
        if not out:
            return
        for i in range(_REGISTER_BROADCASTS):
            if i != 0:
                await asyncio.sleep(millis_to_seconds(_UNREGISTER_TIME))
            self.async_send(out)

    def unregister_all_services(self) -> None:
        """Unregister all registered services.

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `async_unregister_all_services` cannot be completed.
        """
        assert self.loop is not None
        run_coro_with_timeout(
            self.async_unregister_all_services(),
            self.loop,
            _UNREGISTER_TIME * _REGISTER_BROADCASTS,
        )

    async def async_check_service(
        self,
        info: ServiceInfo,
        allow_name_change: bool,
        cooperating_responders: bool = False,
        strict: bool = True,
    ) -> None:
        """Checks the network for a unique service name, modifying the
        ServiceInfo passed in if it is not unique."""
        instance_name = instance_name_from_service_info(info, strict=strict)
        if cooperating_responders:
            return
        next_instance_number = 2
        next_time = now = current_time_millis()
        i = 0
        while i < _REGISTER_BROADCASTS:
            # check for a name conflict
            while self.cache.current_entry_with_name_and_alias(info.type, info.name):
                if not allow_name_change:
                    raise NonUniqueNameException

                # change the name and look for a conflict
                info.name = f"{instance_name}-{next_instance_number}.{info.type}"
                next_instance_number += 1
                service_type_name(info.name, strict=strict)
                next_time = now
                i = 0

            if now < next_time:
                await self.async_wait(next_time - now)
                now = current_time_millis()
                continue

            self.async_send(self.generate_service_query(info))
            i += 1
            next_time += _CHECK_TIME

    def add_listener(
        self,
        listener: RecordUpdateListener,
        question: DNSQuestion | list[DNSQuestion] | None,
    ) -> None:
        """Adds a listener for a given question.  The listener will have
        its update_record method called when information is available to
        answer the question(s).

        This function is threadsafe
        """
        assert self.loop is not None
        self.loop.call_soon_threadsafe(self.record_manager.async_add_listener, listener, question)

    def remove_listener(self, listener: RecordUpdateListener) -> None:
        """Removes a listener.

        This function is threadsafe
        """
        assert self.loop is not None
        self.loop.call_soon_threadsafe(self.record_manager.async_remove_listener, listener)

    def async_add_listener(
        self,
        listener: RecordUpdateListener,
        question: DNSQuestion | list[DNSQuestion] | None,
    ) -> None:
        """Adds a listener for a given question.  The listener will have
        its update_record method called when information is available to
        answer the question(s).

        This function is not threadsafe and must be called in the eventloop.
        """
        self.record_manager.async_add_listener(listener, question)

    def async_remove_listener(self, listener: RecordUpdateListener) -> None:
        """Removes a listener.

        This function is not threadsafe and must be called in the eventloop.
        """
        self.record_manager.async_remove_listener(listener)

    def send(
        self,
        out: DNSOutgoing,
        addr: str | None = None,
        port: int = _MDNS_PORT,
        v6_flow_scope: tuple[()] | tuple[int, int] = (),
        transport: _WrappedTransport | None = None,
    ) -> None:
        """Sends an outgoing packet threadsafe."""
        assert self.loop is not None
        self.loop.call_soon_threadsafe(self.async_send, out, addr, port, v6_flow_scope, transport)

    def async_send(
        self,
        out: DNSOutgoing,
        addr: str | None = None,
        port: int = _MDNS_PORT,
        v6_flow_scope: tuple[()] | tuple[int, int] = (),
        transport: _WrappedTransport | None = None,
    ) -> None:
        """Sends an outgoing packet."""
        if self.done:
            return

        # If no transport is specified, we send to all the ones
        # with the same address family
        transports = [transport] if transport else self.engine.senders
        log_debug = log.isEnabledFor(logging.DEBUG)

        for packet_num, packet in enumerate(out.packets()):
            if len(packet) > _MAX_MSG_ABSOLUTE:
                self.log_warning_once(
                    "Dropping %r over-sized packet (%d bytes) %r",
                    out,
                    len(packet),
                    packet,
                )
                return
            for send_transport in transports:
                async_send_with_transport(
                    log_debug,
                    send_transport,
                    packet,
                    packet_num,
                    out,
                    addr,
                    port,
                    v6_flow_scope,
                )

    def _close(self) -> None:
        """Set global done and remove all service listeners."""
        if self.done:
            return
        self.remove_all_service_listeners()
        self.done = True

    def _shutdown_threads(self) -> None:
        """Shutdown any threads."""
        self.notify_all()
        if not self._loop_thread:
            return
        assert self.loop is not None
        shutdown_loop(self.loop)
        self._loop_thread.join()
        self._loop_thread = None

    def close(self) -> None:
        """Ends the background threads, and prevent this instance from
        servicing further queries.

        This method is idempotent and irreversible.
        """
        assert self.loop is not None
        if self.loop.is_running():
            if self.loop == get_running_loop():
                log.warning(
                    "unregister_all_services skipped as it does blocking i/o; use AsyncZeroconf with asyncio"
                )
            else:
                self.unregister_all_services()
        self._close()
        self.engine.close()
        self._shutdown_threads()

    async def _async_close(self) -> None:
        """Ends the background threads, and prevent this instance from
        servicing further queries.

        This method is idempotent and irreversible.

        This call only intended to be used by AsyncZeroconf

        Callers are responsible for unregistering all services
        before calling this function
        """
        self._close()
        await self.engine._async_close()  # pylint: disable=protected-access
        self._shutdown_threads()

    def __enter__(self) -> Zeroconf:
        return self

    def __exit__(  # pylint: disable=useless-return
        self,
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    ) -> bool | None:
        self.close()
        return None
0707010000002E000081A400000000000000000000000167C7AD1600001023000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/src/zeroconf/_dns.pxd
import cython

from ._protocol.outgoing cimport DNSOutgoing


cdef cython.uint _LEN_BYTE
cdef cython.uint _LEN_SHORT
cdef cython.uint _LEN_INT

cdef cython.uint _NAME_COMPRESSION_MIN_SIZE
cdef cython.uint _BASE_MAX_SIZE

cdef cython.uint _EXPIRE_FULL_TIME_MS
cdef cython.uint _EXPIRE_STALE_TIME_MS
cdef cython.uint _RECENT_TIME_MS

cdef cython.uint _TYPE_ANY

cdef cython.uint _CLASS_UNIQUE
cdef cython.uint _CLASS_MASK

cdef object current_time_millis

cdef class DNSEntry:

    cdef public str key
    cdef public str name
    cdef public cython.uint type
    cdef public cython.uint class_
    cdef public bint unique

    cdef _fast_init_entry(self, str name, cython.uint type_, cython.uint class_)

    cdef bint _dns_entry_matches(self, DNSEntry other)

cdef class DNSQuestion(DNSEntry):

    cdef public cython.int _hash

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_)

    cpdef bint answered_by(self, DNSRecord rec)

cdef class DNSRecord(DNSEntry):

    cdef public cython.float ttl
    cdef public double created

    cdef _fast_init_record(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, double created)

    cdef bint _suppressed_by_answer(self, DNSRecord answer)

    @cython.locals(
        answers=cython.list,
    )
    cpdef bint suppressed_by(self, object msg)

    cpdef get_remaining_ttl(self, double now)

    cpdef double get_expiration_time(self, cython.uint percent)

    cpdef bint is_expired(self, double now)

    cpdef bint is_stale(self, double now)

    cpdef bint is_recent(self, double now)

    cdef _set_created_ttl(self, double now, cython.float ttl)

cdef class DNSAddress(DNSRecord):

    cdef public cython.int _hash
    cdef public bytes address
    cdef public object scope_id

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, bytes address, object scope_id, double created)

    cdef bint _eq(self, DNSAddress other)

    cpdef write(self, DNSOutgoing out)


cdef class DNSHinfo(DNSRecord):

    cdef public cython.int _hash
    cdef public str cpu
    cdef public str os

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, str cpu, str os, double created)

    cdef bint _eq(self, DNSHinfo other)

    cpdef write(self, DNSOutgoing out)

cdef class DNSPointer(DNSRecord):

    cdef public cython.int _hash
    cdef public str alias
    cdef public str alias_key

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, str alias, double created)

    cdef bint _eq(self, DNSPointer other)

    cpdef write(self, DNSOutgoing out)

cdef class DNSText(DNSRecord):

    cdef public cython.int _hash
    cdef public bytes text

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, bytes text, double created)

    cdef bint _eq(self, DNSText other)

    cpdef write(self, DNSOutgoing out)

cdef class DNSService(DNSRecord):

    cdef public cython.int _hash
    cdef public cython.uint priority
    cdef public cython.uint weight
    cdef public cython.uint port
    cdef public str server
    cdef public str server_key

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, cython.uint priority, cython.uint weight, cython.uint port, str server, double created)

    cdef bint _eq(self, DNSService other)

    cpdef write(self, DNSOutgoing out)

cdef class DNSNsec(DNSRecord):

    cdef public cython.int _hash
    cdef public str next_name
    cdef public cython.list rdtypes

    cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, cython.float ttl, str next_name, cython.list rdtypes, double created)

    cdef bint _eq(self, DNSNsec other)

    cpdef write(self, DNSOutgoing out)

cdef class DNSRRSet:

    cdef cython.list _records
    cdef cython.dict _lookup

    @cython.locals(other=DNSRecord)
    cpdef bint suppresses(self, DNSRecord record)

    @cython.locals(
        record=DNSRecord,
        record_sets=cython.list,
    )
    cdef cython.dict _get_lookup(self)

    cpdef cython.set lookup_set(self)
0707010000002F000081A400000000000000000000000167C7AD1600005168000000000000000000000000000000000000002D00000000python-zeroconf-0.146.0/src/zeroconf/_dns.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import enum
import socket
from typing import TYPE_CHECKING, Any, cast

from ._exceptions import AbstractMethodException
from ._utils.net import _is_v6_address
from ._utils.time import current_time_millis
from .const import _CLASS_MASK, _CLASS_UNIQUE, _CLASSES, _TYPE_ANY, _TYPES

_LEN_BYTE = 1
_LEN_SHORT = 2
_LEN_INT = 4

_BASE_MAX_SIZE = _LEN_SHORT + _LEN_SHORT + _LEN_INT + _LEN_SHORT  # type  # class  # ttl  # length
_NAME_COMPRESSION_MIN_SIZE = _LEN_BYTE * 2

_EXPIRE_FULL_TIME_MS = 1000
_EXPIRE_STALE_TIME_MS = 500
_RECENT_TIME_MS = 250

_float = float
_int = int

if TYPE_CHECKING:
    from ._protocol.incoming import DNSIncoming
    from ._protocol.outgoing import DNSOutgoing


@enum.unique
class DNSQuestionType(enum.Enum):
    """An MDNS question type.

    "QU" - questions requesting unicast responses
    "QM" - questions requesting multicast responses
    https://datatracker.ietf.org/doc/html/rfc6762#section-5.4
    """

    QU = 1
    QM = 2


class DNSEntry:
    """A DNS entry"""

    __slots__ = ("class_", "key", "name", "type", "unique")

    def __init__(self, name: str, type_: int, class_: int) -> None:
        self._fast_init_entry(name, type_, class_)

    def _fast_init_entry(self, name: str, type_: _int, class_: _int) -> None:
        """Fast init for reuse."""
        self.name = name
        self.key = name.lower()
        self.type = type_
        self.class_ = class_ & _CLASS_MASK
        self.unique = (class_ & _CLASS_UNIQUE) != 0

    def _dns_entry_matches(self, other: DNSEntry) -> bool:
        return self.key == other.key and self.type == other.type and self.class_ == other.class_

    def __eq__(self, other: Any) -> bool:
        """Equality test on key (lowercase name), type, and class"""
        return isinstance(other, DNSEntry) and self._dns_entry_matches(other)

    @staticmethod
    def get_class_(class_: int) -> str:
        """Class accessor"""
        return _CLASSES.get(class_, f"?({class_})")

    @staticmethod
    def get_type(t: int) -> str:
        """Type accessor"""
        return _TYPES.get(t, f"?({t})")

    def entry_to_string(self, hdr: str, other: bytes | str | None) -> str:
        """String representation with additional information"""
        return "{}[{},{}{},{}]{}".format(
            hdr,
            self.get_type(self.type),
            self.get_class_(self.class_),
            "-unique" if self.unique else "",
            self.name,
            f"={cast(Any, other)}" if other is not None else "",
        )


class DNSQuestion(DNSEntry):
    """A DNS question entry"""

    __slots__ = ("_hash",)

    def __init__(self, name: str, type_: int, class_: int) -> None:
        self._fast_init(name, type_, class_)

    def _fast_init(self, name: str, type_: _int, class_: _int) -> None:
        """Fast init for reuse."""
        self._fast_init_entry(name, type_, class_)
        self._hash = hash((self.key, type_, self.class_))

    def answered_by(self, rec: DNSRecord) -> bool:
        """Returns true if the question is answered by the record"""
        return self.class_ == rec.class_ and self.type in (rec.type, _TYPE_ANY) and self.name == rec.name

    def __hash__(self) -> int:
        return self._hash

    def __eq__(self, other: Any) -> bool:
        """Tests equality on dns question."""
        return isinstance(other, DNSQuestion) and self._dns_entry_matches(other)

    @property
    def max_size(self) -> int:
        """Maximum size of the question in the packet."""
        return len(self.name.encode("utf-8")) + _LEN_BYTE + _LEN_SHORT + _LEN_SHORT

    @property
    def unicast(self) -> bool:
        """Returns true if the QU (not QM) is set.

        unique shares the same mask as the one
        used for unicast.
        """
        return self.unique

    @unicast.setter
    def unicast(self, value: bool) -> None:
        """Sets the QU bit (not QM)."""
        self.unique = value

    def __repr__(self) -> str:
        """String representation"""
        return "{}[question,{},{},{}]".format(
            self.get_type(self.type),
            "QU" if self.unicast else "QM",
            self.get_class_(self.class_),
            self.name,
        )


class DNSRecord(DNSEntry):
    """A DNS record - like a DNS entry, but has a TTL"""

    __slots__ = ("created", "ttl")

    # TODO: Switch to just int ttl
    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: float | int,
        created: float | None = None,
    ) -> None:
        self._fast_init_record(name, type_, class_, ttl, created or current_time_millis())

    def _fast_init_record(self, name: str, type_: _int, class_: _int, ttl: _float, created: _float) -> None:
        """Fast init for reuse."""
        self._fast_init_entry(name, type_, class_)
        self.ttl = ttl
        self.created = created

    def __eq__(self, other: Any) -> bool:  # pylint: disable=no-self-use
        """Abstract method"""
        raise AbstractMethodException

    def __lt__(self, other: DNSRecord) -> bool:
        return self.ttl < other.ttl

    def suppressed_by(self, msg: DNSIncoming) -> bool:
        """Returns true if any answer in a message can suffice for the
        information held in this record."""
        answers = msg.answers()
        for record in answers:
            if self._suppressed_by_answer(record):
                return True
        return False

    def _suppressed_by_answer(self, other: DNSRecord) -> bool:
        """Returns true if another record has same name, type and class,
        and if its TTL is at least half of this record's."""
        return self == other and other.ttl > (self.ttl / 2)

    def get_expiration_time(self, percent: _int) -> float:
        """Returns the time at which this record will have expired
        by a certain percentage."""
        return self.created + (percent * self.ttl * 10)

    # TODO: Switch to just int here
    def get_remaining_ttl(self, now: _float) -> int | float:
        """Returns the remaining TTL in seconds."""
        remain = (self.created + (_EXPIRE_FULL_TIME_MS * self.ttl) - now) / 1000.0
        return 0 if remain < 0 else remain

    def is_expired(self, now: _float) -> bool:
        """Returns true if this record has expired."""
        return self.created + (_EXPIRE_FULL_TIME_MS * self.ttl) <= now

    def is_stale(self, now: _float) -> bool:
        """Returns true if this record is at least half way expired."""
        return self.created + (_EXPIRE_STALE_TIME_MS * self.ttl) <= now

    def is_recent(self, now: _float) -> bool:
        """Returns true if the record more than one quarter of its TTL remaining."""
        return self.created + (_RECENT_TIME_MS * self.ttl) > now

    def _set_created_ttl(self, created: _float, ttl: float | int) -> None:
        """Set the created and ttl of a record."""
        # It would be better if we made a copy instead of mutating the record
        # in place, but records currently don't have a copy method.
        self.created = created
        self.ttl = ttl

    def write(self, out: DNSOutgoing) -> None:  # pylint: disable=no-self-use
        """Abstract method"""
        raise AbstractMethodException

    def to_string(self, other: bytes | str) -> str:
        """String representation with additional information"""
        arg = f"{self.ttl}/{int(self.get_remaining_ttl(current_time_millis()))},{cast(Any, other)}"
        return DNSEntry.entry_to_string(self, "record", arg)


class DNSAddress(DNSRecord):
    """A DNS address record"""

    __slots__ = ("_hash", "address", "scope_id")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: int,
        address: bytes,
        scope_id: int | None = None,
        created: float | None = None,
    ) -> None:
        self._fast_init(name, type_, class_, ttl, address, scope_id, created or current_time_millis())

    def _fast_init(
        self,
        name: str,
        type_: _int,
        class_: _int,
        ttl: _float,
        address: bytes,
        scope_id: _int | None,
        created: _float,
    ) -> None:
        """Fast init for reuse."""
        self._fast_init_record(name, type_, class_, ttl, created)
        self.address = address
        self.scope_id = scope_id
        self._hash = hash((self.key, type_, self.class_, address, scope_id))

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet"""
        out.write_string(self.address)

    def __eq__(self, other: Any) -> bool:
        """Tests equality on address"""
        return isinstance(other, DNSAddress) and self._eq(other)

    def _eq(self, other: DNSAddress) -> bool:
        return (
            self.address == other.address
            and self.scope_id == other.scope_id
            and self._dns_entry_matches(other)
        )

    def __hash__(self) -> int:
        """Hash to compare like DNSAddresses."""
        return self._hash

    def __repr__(self) -> str:
        """String representation"""
        try:
            return self.to_string(
                socket.inet_ntop(
                    socket.AF_INET6 if _is_v6_address(self.address) else socket.AF_INET,
                    self.address,
                )
            )
        except (ValueError, OSError):
            return self.to_string(str(self.address))


class DNSHinfo(DNSRecord):
    """A DNS host information record"""

    __slots__ = ("_hash", "cpu", "os")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: int,
        cpu: str,
        os: str,
        created: float | None = None,
    ) -> None:
        self._fast_init(name, type_, class_, ttl, cpu, os, created or current_time_millis())

    def _fast_init(
        self, name: str, type_: _int, class_: _int, ttl: _float, cpu: str, os: str, created: _float
    ) -> None:
        """Fast init for reuse."""
        self._fast_init_record(name, type_, class_, ttl, created)
        self.cpu = cpu
        self.os = os
        self._hash = hash((self.key, type_, self.class_, cpu, os))

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet"""
        out.write_character_string(self.cpu.encode("utf-8"))
        out.write_character_string(self.os.encode("utf-8"))

    def __eq__(self, other: Any) -> bool:
        """Tests equality on cpu and os."""
        return isinstance(other, DNSHinfo) and self._eq(other)

    def _eq(self, other: DNSHinfo) -> bool:
        """Tests equality on cpu and os."""
        return self.cpu == other.cpu and self.os == other.os and self._dns_entry_matches(other)

    def __hash__(self) -> int:
        """Hash to compare like DNSHinfo."""
        return self._hash

    def __repr__(self) -> str:
        """String representation"""
        return self.to_string(self.cpu + " " + self.os)


class DNSPointer(DNSRecord):
    """A DNS pointer record"""

    __slots__ = ("_hash", "alias", "alias_key")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: int,
        alias: str,
        created: float | None = None,
    ) -> None:
        self._fast_init(name, type_, class_, ttl, alias, created or current_time_millis())

    def _fast_init(
        self, name: str, type_: _int, class_: _int, ttl: _float, alias: str, created: _float
    ) -> None:
        self._fast_init_record(name, type_, class_, ttl, created)
        self.alias = alias
        self.alias_key = alias.lower()
        self._hash = hash((self.key, type_, self.class_, self.alias_key))

    @property
    def max_size_compressed(self) -> int:
        """Maximum size of the record in the packet assuming the name has been compressed."""
        return (
            _BASE_MAX_SIZE
            + _NAME_COMPRESSION_MIN_SIZE
            + (len(self.alias) - len(self.name))
            + _NAME_COMPRESSION_MIN_SIZE
        )

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet"""
        out.write_name(self.alias)

    def __eq__(self, other: Any) -> bool:
        """Tests equality on alias."""
        return isinstance(other, DNSPointer) and self._eq(other)

    def _eq(self, other: DNSPointer) -> bool:
        """Tests equality on alias."""
        return self.alias_key == other.alias_key and self._dns_entry_matches(other)

    def __hash__(self) -> int:
        """Hash to compare like DNSPointer."""
        return self._hash

    def __repr__(self) -> str:
        """String representation"""
        return self.to_string(self.alias)


class DNSText(DNSRecord):
    """A DNS text record"""

    __slots__ = ("_hash", "text")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: int,
        text: bytes,
        created: float | None = None,
    ) -> None:
        self._fast_init(name, type_, class_, ttl, text, created or current_time_millis())

    def _fast_init(
        self, name: str, type_: _int, class_: _int, ttl: _float, text: bytes, created: _float
    ) -> None:
        self._fast_init_record(name, type_, class_, ttl, created)
        self.text = text
        self._hash = hash((self.key, type_, self.class_, text))

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet"""
        out.write_string(self.text)

    def __hash__(self) -> int:
        """Hash to compare like DNSText."""
        return self._hash

    def __eq__(self, other: Any) -> bool:
        """Tests equality on text."""
        return isinstance(other, DNSText) and self._eq(other)

    def _eq(self, other: DNSText) -> bool:
        """Tests equality on text."""
        return self.text == other.text and self._dns_entry_matches(other)

    def __repr__(self) -> str:
        """String representation"""
        if len(self.text) > 10:
            return self.to_string(self.text[:7]) + "..."
        return self.to_string(self.text)


class DNSService(DNSRecord):
    """A DNS service record"""

    __slots__ = ("_hash", "port", "priority", "server", "server_key", "weight")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: float | int,
        priority: int,
        weight: int,
        port: int,
        server: str,
        created: float | None = None,
    ) -> None:
        self._fast_init(
            name, type_, class_, ttl, priority, weight, port, server, created or current_time_millis()
        )

    def _fast_init(
        self,
        name: str,
        type_: _int,
        class_: _int,
        ttl: _float,
        priority: _int,
        weight: _int,
        port: _int,
        server: str,
        created: _float,
    ) -> None:
        self._fast_init_record(name, type_, class_, ttl, created)
        self.priority = priority
        self.weight = weight
        self.port = port
        self.server = server
        self.server_key = server.lower()
        self._hash = hash((self.key, type_, self.class_, priority, weight, port, self.server_key))

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet"""
        out.write_short(self.priority)
        out.write_short(self.weight)
        out.write_short(self.port)
        out.write_name(self.server)

    def __eq__(self, other: Any) -> bool:
        """Tests equality on priority, weight, port and server"""
        return isinstance(other, DNSService) and self._eq(other)

    def _eq(self, other: DNSService) -> bool:
        """Tests equality on priority, weight, port and server."""
        return (
            self.priority == other.priority
            and self.weight == other.weight
            and self.port == other.port
            and self.server_key == other.server_key
            and self._dns_entry_matches(other)
        )

    def __hash__(self) -> int:
        """Hash to compare like DNSService."""
        return self._hash

    def __repr__(self) -> str:
        """String representation"""
        return self.to_string(f"{self.server}:{self.port}")


class DNSNsec(DNSRecord):
    """A DNS NSEC record"""

    __slots__ = ("_hash", "next_name", "rdtypes")

    def __init__(
        self,
        name: str,
        type_: int,
        class_: int,
        ttl: int | float,
        next_name: str,
        rdtypes: list[int],
        created: float | None = None,
    ) -> None:
        self._fast_init(name, type_, class_, ttl, next_name, rdtypes, created or current_time_millis())

    def _fast_init(
        self,
        name: str,
        type_: _int,
        class_: _int,
        ttl: _float,
        next_name: str,
        rdtypes: list[_int],
        created: _float,
    ) -> None:
        self._fast_init_record(name, type_, class_, ttl, created)
        self.next_name = next_name
        self.rdtypes = sorted(rdtypes)
        self._hash = hash((self.key, type_, self.class_, next_name, *self.rdtypes))

    def write(self, out: DNSOutgoing) -> None:
        """Used in constructing an outgoing packet."""
        bitmap = bytearray(b"\0" * 32)
        total_octets = 0
        for rdtype in self.rdtypes:
            if rdtype > 255:  # mDNS only supports window 0
                raise ValueError(f"rdtype {rdtype} is too large for NSEC")
            byte = rdtype // 8
            total_octets = byte + 1
            bitmap[byte] |= 0x80 >> (rdtype % 8)
        if total_octets == 0:
            # NSEC must have at least one rdtype
            # Writing an empty bitmap is not allowed
            raise ValueError("NSEC must have at least one rdtype")
        out_bytes = bytes(bitmap[0:total_octets])
        out.write_name(self.next_name)
        out._write_byte(0)  # Always window 0
        out._write_byte(len(out_bytes))
        out.write_string(out_bytes)

    def __eq__(self, other: Any) -> bool:
        """Tests equality on next_name and rdtypes."""
        return isinstance(other, DNSNsec) and self._eq(other)

    def _eq(self, other: DNSNsec) -> bool:
        """Tests equality on next_name and rdtypes."""
        return (
            self.next_name == other.next_name
            and self.rdtypes == other.rdtypes
            and self._dns_entry_matches(other)
        )

    def __hash__(self) -> int:
        """Hash to compare like DNSNSec."""
        return self._hash

    def __repr__(self) -> str:
        """String representation"""
        return self.to_string(
            self.next_name + "," + "|".join([self.get_type(type_) for type_ in self.rdtypes])
        )


_DNSRecord = DNSRecord


class DNSRRSet:
    """A set of dns records with a lookup to get the ttl."""

    __slots__ = ("_lookup", "_records")

    def __init__(self, records: list[DNSRecord]) -> None:
        """Create an RRset from records sets."""
        self._records = records
        self._lookup: dict[DNSRecord, DNSRecord] | None = None

    @property
    def lookup(self) -> dict[DNSRecord, DNSRecord]:
        """Return the lookup table."""
        return self._get_lookup()

    def lookup_set(self) -> set[DNSRecord]:
        """Return the lookup table as aset."""
        return set(self._get_lookup())

    def _get_lookup(self) -> dict[DNSRecord, DNSRecord]:
        """Return the lookup table, building it if needed."""
        if self._lookup is None:
            # Build the hash table so we can lookup the record ttl
            self._lookup = {record: record for record in self._records}
        return self._lookup

    def suppresses(self, record: _DNSRecord) -> bool:
        """Returns true if any answer in the rrset can suffice for the
        information held in this record."""
        lookup = self._get_lookup()
        other = lookup.get(record)
        if other is None:
            return False
        return other.ttl > (record.ttl / 2)
07070100000030000081A400000000000000000000000167C7AD1600001868000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/src/zeroconf/_engine.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import itertools
import socket
import threading
from typing import TYPE_CHECKING, cast

from ._record_update import RecordUpdate
from ._utils.asyncio import get_running_loop, run_coro_with_timeout
from ._utils.time import current_time_millis
from .const import _CACHE_CLEANUP_INTERVAL

if TYPE_CHECKING:
    from ._core import Zeroconf


from ._listener import AsyncListener
from ._transport import _WrappedTransport, make_wrapped_transport

_CLOSE_TIMEOUT = 3000  # ms


class AsyncEngine:
    """An engine wraps sockets in the event loop."""

    __slots__ = (
        "_cleanup_timer",
        "_listen_socket",
        "_respond_sockets",
        "_setup_task",
        "loop",
        "protocols",
        "readers",
        "running_future",
        "senders",
        "zc",
    )

    def __init__(
        self,
        zeroconf: Zeroconf,
        listen_socket: socket.socket | None,
        respond_sockets: list[socket.socket],
    ) -> None:
        self.loop: asyncio.AbstractEventLoop | None = None
        self.zc = zeroconf
        self.protocols: list[AsyncListener] = []
        self.readers: list[_WrappedTransport] = []
        self.senders: list[_WrappedTransport] = []
        self.running_future: asyncio.Future[bool | None] | None = None
        self._listen_socket = listen_socket
        self._respond_sockets = respond_sockets
        self._cleanup_timer: asyncio.TimerHandle | None = None
        self._setup_task: asyncio.Task[None] | None = None

    def setup(
        self,
        loop: asyncio.AbstractEventLoop,
        loop_thread_ready: threading.Event | None,
    ) -> None:
        """Set up the instance."""
        self.loop = loop
        self.running_future = loop.create_future()
        self._setup_task = self.loop.create_task(self._async_setup(loop_thread_ready))

    async def _async_setup(self, loop_thread_ready: threading.Event | None) -> None:
        """Set up the instance."""
        self._async_schedule_next_cache_cleanup()
        await self._async_create_endpoints()
        assert self.running_future is not None
        if not self.running_future.done():
            self.running_future.set_result(True)
        if loop_thread_ready:
            loop_thread_ready.set()

    async def _async_create_endpoints(self) -> None:
        """Create endpoints to send and receive."""
        assert self.loop is not None
        loop = self.loop
        reader_sockets = []
        sender_sockets = []
        if self._listen_socket:
            reader_sockets.append(self._listen_socket)
        for s in self._respond_sockets:
            if s not in reader_sockets:
                reader_sockets.append(s)
            sender_sockets.append(s)

        for s in reader_sockets:
            transport, protocol = await loop.create_datagram_endpoint(  # type: ignore[type-var]
                lambda: AsyncListener(self.zc),  # type: ignore[arg-type, return-value]
                sock=s,
            )
            self.protocols.append(cast(AsyncListener, protocol))
            self.readers.append(make_wrapped_transport(cast(asyncio.DatagramTransport, transport)))
            if s in sender_sockets:
                self.senders.append(make_wrapped_transport(cast(asyncio.DatagramTransport, transport)))

    def _async_cache_cleanup(self) -> None:
        """Periodic cache cleanup."""
        now = current_time_millis()
        self.zc.question_history.async_expire(now)
        self.zc.record_manager.async_updates(
            now,
            [RecordUpdate(record, record) for record in self.zc.cache.async_expire(now)],
        )
        self.zc.record_manager.async_updates_complete(False)
        self._async_schedule_next_cache_cleanup()

    def _async_schedule_next_cache_cleanup(self) -> None:
        """Schedule the next cache cleanup."""
        loop = self.loop
        assert loop is not None
        self._cleanup_timer = loop.call_at(loop.time() + _CACHE_CLEANUP_INTERVAL, self._async_cache_cleanup)

    async def _async_close(self) -> None:
        """Cancel and wait for the cleanup task to finish."""
        assert self._setup_task is not None
        await self._setup_task
        self._async_shutdown()
        await asyncio.sleep(0)  # flush out any call soons
        assert self._cleanup_timer is not None
        self._cleanup_timer.cancel()

    def _async_shutdown(self) -> None:
        """Shutdown transports and sockets."""
        assert self.running_future is not None
        assert self.loop is not None
        self.running_future = self.loop.create_future()
        for wrapped_transport in itertools.chain(self.senders, self.readers):
            wrapped_transport.transport.close()

    def close(self) -> None:
        """Close from sync context.

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `_async_close` cannot be completed.
        """
        assert self.loop is not None
        # Guard against Zeroconf.close() being called from the eventloop
        if get_running_loop() == self.loop:
            self._async_shutdown()
            return
        if not self.loop.is_running():
            return
        run_coro_with_timeout(self._async_close(), self.loop, _CLOSE_TIMEOUT)
07070100000031000081A400000000000000000000000167C7AD160000085E000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/src/zeroconf/_exceptions.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations


class Error(Exception):
    """Base class for all zeroconf exceptions."""


class IncomingDecodeError(Error):
    """Exception when there is invalid data in an incoming packet."""


class NonUniqueNameException(Error):
    """Exception when the name is already registered."""


class NamePartTooLongException(Error):
    """Exception when the name is too long."""


class AbstractMethodException(Error):
    """Exception when a required method is not implemented."""


class BadTypeInNameException(Error):
    """Exception when the type in a name is invalid."""


class ServiceNameAlreadyRegistered(Error):
    """Exception when a service name is already registered."""


class EventLoopBlocked(Error):
    """Exception when the event loop is blocked.

    This exception is never expected to be thrown
    during normal operation. It should only happen
    when the cpu is maxed out or there is something blocking
    the event loop.
    """


class NotRunningException(Error):
    """Exception when an action is called with a zeroconf instance that is not running.

    The instance may not be running because it was already shutdown
    or startup has failed in some unexpected way.
    """
07070100000032000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/src/zeroconf/_handlers07070100000033000081A400000000000000000000000167C7AD16000003B2000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_handlers/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations
07070100000034000081A400000000000000000000000167C7AD1600000339000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_handlers/answers.pxd
import cython

from .._dns cimport DNSRecord
from .._protocol.outgoing cimport DNSOutgoing


cdef class QuestionAnswers:

    cdef public dict ucast
    cdef public dict mcast_now
    cdef public dict mcast_aggregate
    cdef public dict mcast_aggregate_last_second


cdef class AnswerGroup:

    cdef public double send_after
    cdef public double send_before
    cdef public cython.dict answers


cdef object _FLAGS_QR_RESPONSE_AA
cdef object NAME_GETTER

cpdef DNSOutgoing construct_outgoing_multicast_answers(cython.dict answers)

cpdef DNSOutgoing construct_outgoing_unicast_answers(
    cython.dict answers, bint ucast_source, cython.list questions, object id_
)


@cython.locals(answer=DNSRecord, additionals=cython.set, additional=DNSRecord)
cdef void _add_answers_additionals(DNSOutgoing out, cython.dict answers)
07070100000035000081A400000000000000000000000167C7AD16000010DB000000000000000000000000000000000000003A00000000python-zeroconf-0.146.0/src/zeroconf/_handlers/answers.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from operator import attrgetter

from .._dns import DNSQuestion, DNSRecord
from .._protocol.outgoing import DNSOutgoing
from ..const import _FLAGS_AA, _FLAGS_QR_RESPONSE

_AnswerWithAdditionalsType = dict[DNSRecord, set[DNSRecord]]

int_ = int


MULTICAST_DELAY_RANDOM_INTERVAL = (20, 120)

NAME_GETTER = attrgetter("name")

_FLAGS_QR_RESPONSE_AA = _FLAGS_QR_RESPONSE | _FLAGS_AA

float_ = float


class QuestionAnswers:
    """A group of answers to a question."""

    __slots__ = ("mcast_aggregate", "mcast_aggregate_last_second", "mcast_now", "ucast")

    def __init__(
        self,
        ucast: _AnswerWithAdditionalsType,
        mcast_now: _AnswerWithAdditionalsType,
        mcast_aggregate: _AnswerWithAdditionalsType,
        mcast_aggregate_last_second: _AnswerWithAdditionalsType,
    ) -> None:
        """Initialize a QuestionAnswers."""
        self.ucast = ucast
        self.mcast_now = mcast_now
        self.mcast_aggregate = mcast_aggregate
        self.mcast_aggregate_last_second = mcast_aggregate_last_second

    def __repr__(self) -> str:
        """Return a string representation of this QuestionAnswers."""
        return (
            f"QuestionAnswers(ucast={self.ucast}, mcast_now={self.mcast_now}, "
            f"mcast_aggregate={self.mcast_aggregate}, "
            f"mcast_aggregate_last_second={self.mcast_aggregate_last_second})"
        )


class AnswerGroup:
    """A group of answers scheduled to be sent at the same time."""

    __slots__ = ("answers", "send_after", "send_before")

    def __init__(
        self,
        send_after: float_,
        send_before: float_,
        answers: _AnswerWithAdditionalsType,
    ) -> None:
        self.send_after = send_after  # Must be sent after this time
        self.send_before = send_before  # Must be sent before this time
        self.answers = answers


def construct_outgoing_multicast_answers(
    answers: _AnswerWithAdditionalsType,
) -> DNSOutgoing:
    """Add answers and additionals to a DNSOutgoing."""
    out = DNSOutgoing(_FLAGS_QR_RESPONSE_AA, True)
    _add_answers_additionals(out, answers)
    return out


def construct_outgoing_unicast_answers(
    answers: _AnswerWithAdditionalsType,
    ucast_source: bool,
    questions: list[DNSQuestion],
    id_: int_,
) -> DNSOutgoing:
    """Add answers and additionals to a DNSOutgoing."""
    out = DNSOutgoing(_FLAGS_QR_RESPONSE_AA, False, id_)
    # Adding the questions back when the source is legacy unicast behavior
    if ucast_source:
        for question in questions:
            out.add_question(question)
    _add_answers_additionals(out, answers)
    return out


def _add_answers_additionals(out: DNSOutgoing, answers: _AnswerWithAdditionalsType) -> None:
    # Find additionals and suppress any additionals that are already in answers
    sending: set[DNSRecord] = set(answers)
    # Answers are sorted to group names together to increase the chance
    # that similar names will end up in the same packet and can reduce the
    # overall size of the outgoing response via name compression
    for answer in sorted(answers, key=NAME_GETTER):
        out.add_answer_at_time(answer, 0)
        additionals = answers[answer]
        for additional in additionals:
            if additional not in sending:
                out.add_additional_answer(additional)
                sending.add(additional)
07070100000036000081A400000000000000000000000167C7AD1600000307000000000000000000000000000000000000004C00000000python-zeroconf-0.146.0/src/zeroconf/_handlers/multicast_outgoing_queue.pxd
import cython

from .._utils.time cimport current_time_millis, millis_to_seconds
from .answers cimport AnswerGroup, construct_outgoing_multicast_answers


cdef bint TYPE_CHECKING
cdef tuple MULTICAST_DELAY_RANDOM_INTERVAL
cdef object RAND_INT

cdef class MulticastOutgoingQueue:

    cdef object zc
    cdef public object queue
    cdef public object _multicast_delay_random_min
    cdef public object _multicast_delay_random_max
    cdef object _additional_delay
    cdef object _aggregation_delay

    @cython.locals(last_group=AnswerGroup, random_int=cython.uint)
    cpdef void async_add(self, double now, cython.dict answers)

    @cython.locals(pending=AnswerGroup)
    cdef void _remove_answers_from_queue(self, cython.dict answers)

    cpdef void async_ready(self)
07070100000037000081A400000000000000000000000167C7AD16000013E7000000000000000000000000000000000000004B00000000python-zeroconf-0.146.0/src/zeroconf/_handlers/multicast_outgoing_queue.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import random
from collections import deque
from typing import TYPE_CHECKING

from .._utils.time import current_time_millis, millis_to_seconds
from .answers import (
    MULTICAST_DELAY_RANDOM_INTERVAL,
    AnswerGroup,
    _AnswerWithAdditionalsType,
    construct_outgoing_multicast_answers,
)

RAND_INT = random.randint

if TYPE_CHECKING:
    from .._core import Zeroconf

_float = float
_int = int


class MulticastOutgoingQueue:
    """An outgoing queue used to aggregate multicast responses."""

    __slots__ = (
        "_additional_delay",
        "_aggregation_delay",
        "_multicast_delay_random_max",
        "_multicast_delay_random_min",
        "queue",
        "zc",
    )

    def __init__(self, zeroconf: Zeroconf, additional_delay: _int, max_aggregation_delay: _int) -> None:
        self.zc = zeroconf
        self.queue: deque[AnswerGroup] = deque()
        # Additional delay is used to implement
        # Protect the network against excessive packet flooding
        # https://datatracker.ietf.org/doc/html/rfc6762#section-14
        self._multicast_delay_random_min = MULTICAST_DELAY_RANDOM_INTERVAL[0]
        self._multicast_delay_random_max = MULTICAST_DELAY_RANDOM_INTERVAL[1]
        self._additional_delay = additional_delay
        self._aggregation_delay = max_aggregation_delay

    def async_add(self, now: _float, answers: _AnswerWithAdditionalsType) -> None:
        """Add a group of answers with additionals to the outgoing queue."""
        loop = self.zc.loop
        if TYPE_CHECKING:
            assert loop is not None
        random_int = RAND_INT(self._multicast_delay_random_min, self._multicast_delay_random_max)
        random_delay = random_int + self._additional_delay
        send_after = now + random_delay
        send_before = now + self._aggregation_delay + self._additional_delay
        if len(self.queue):
            # If we calculate a random delay for the send after time
            # that is less than the last group scheduled to go out,
            # we instead add the answers to the last group as this
            # allows aggregating additional responses
            last_group = self.queue[-1]
            if send_after <= last_group.send_after:
                last_group.answers.update(answers)
                return
        else:
            loop.call_at(loop.time() + millis_to_seconds(random_delay), self.async_ready)
        self.queue.append(AnswerGroup(send_after, send_before, answers))

    def _remove_answers_from_queue(self, answers: _AnswerWithAdditionalsType) -> None:
        """Remove a set of answers from the outgoing queue."""
        for pending in self.queue:
            for record in answers:
                pending.answers.pop(record, None)

    def async_ready(self) -> None:
        """Process anything in the queue that is ready."""
        zc = self.zc
        loop = zc.loop
        if TYPE_CHECKING:
            assert loop is not None
        now = current_time_millis()

        if len(self.queue) > 1 and self.queue[0].send_before > now:
            # There is more than one answer in the queue,
            # delay until we have to send it (first answer group reaches send_before)
            loop.call_at(
                loop.time() + millis_to_seconds(self.queue[0].send_before - now),
                self.async_ready,
            )
            return

        answers: _AnswerWithAdditionalsType = {}
        # Add all groups that can be sent now
        while len(self.queue) and self.queue[0].send_after <= now:
            answers.update(self.queue.popleft().answers)

        if len(self.queue):
            # If there are still groups in the queue that are not ready to send
            # be sure we schedule them to go out later
            loop.call_at(
                loop.time() + millis_to_seconds(self.queue[0].send_after - now),
                self.async_ready,
            )

        if answers:  # pragma: no branch
            # If we have the same answer scheduled to go out, remove them
            self._remove_answers_from_queue(answers)
            zc.async_send(construct_outgoing_multicast_answers(answers))
07070100000038000081A400000000000000000000000167C7AD1600000F42000000000000000000000000000000000000004100000000python-zeroconf-0.146.0/src/zeroconf/_handlers/query_handler.pxd
import cython

from .._cache cimport DNSCache
from .._dns cimport DNSAddress, DNSPointer, DNSQuestion, DNSRecord, DNSRRSet
from .._history cimport QuestionHistory
from .._protocol.incoming cimport DNSIncoming
from .._services.info cimport ServiceInfo
from .._services.registry cimport ServiceRegistry
from .answers cimport (
    QuestionAnswers,
    construct_outgoing_multicast_answers,
    construct_outgoing_unicast_answers,
)
from .multicast_outgoing_queue cimport MulticastOutgoingQueue


cdef bint TYPE_CHECKING
cdef cython.uint _ONE_SECOND, _TYPE_PTR, _TYPE_ANY, _TYPE_A, _TYPE_AAAA, _TYPE_SRV, _TYPE_TXT
cdef str _SERVICE_TYPE_ENUMERATION_NAME
cdef cython.set _RESPOND_IMMEDIATE_TYPES
cdef cython.set _ADDRESS_RECORD_TYPES
cdef object IPVersion, _IPVersion_ALL
cdef object _TYPE_PTR, _CLASS_IN, _DNS_OTHER_TTL

cdef unsigned int _ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION
cdef unsigned int _ANSWER_STRATEGY_POINTER
cdef unsigned int _ANSWER_STRATEGY_ADDRESS
cdef unsigned int _ANSWER_STRATEGY_SERVICE
cdef unsigned int _ANSWER_STRATEGY_TEXT

cdef list _EMPTY_SERVICES_LIST
cdef list _EMPTY_TYPES_LIST

cdef class _AnswerStrategy:

    cdef public DNSQuestion question
    cdef public unsigned int strategy_type
    cdef public list types
    cdef public list services


cdef class _QueryResponse:

    cdef bint _is_probe
    cdef cython.list _questions
    cdef double _now
    cdef DNSCache _cache
    cdef cython.dict _additionals
    cdef cython.set _ucast
    cdef cython.set _mcast_now
    cdef cython.set _mcast_aggregate
    cdef cython.set _mcast_aggregate_last_second

    @cython.locals(record=DNSRecord)
    cdef void add_qu_question_response(self, cython.dict answers)

    cdef void add_ucast_question_response(self, cython.dict answers)

    @cython.locals(answer=DNSRecord, question=DNSQuestion)
    cdef void add_mcast_question_response(self, cython.dict answers)

    @cython.locals(maybe_entry=DNSRecord)
    cdef bint _has_mcast_within_one_quarter_ttl(self, DNSRecord record)

    @cython.locals(maybe_entry=DNSRecord)
    cdef bint _has_mcast_record_in_last_second(self, DNSRecord record)

    cdef QuestionAnswers answers(self)

cdef class QueryHandler:

    cdef object zc
    cdef ServiceRegistry registry
    cdef DNSCache cache
    cdef QuestionHistory question_history
    cdef MulticastOutgoingQueue out_queue
    cdef MulticastOutgoingQueue out_delay_queue

    @cython.locals(service=ServiceInfo)
    cdef void _add_service_type_enumeration_query_answers(self, list types, cython.dict answer_set, DNSRRSet known_answers)

    @cython.locals(service=ServiceInfo)
    cdef void _add_pointer_answers(self, list services, cython.dict answer_set, DNSRRSet known_answers)

    @cython.locals(service=ServiceInfo, dns_address=DNSAddress)
    cdef void _add_address_answers(self, list services, cython.dict answer_set, DNSRRSet known_answers, cython.uint type_)

    @cython.locals(question_lower_name=str, type_=cython.uint, service=ServiceInfo)
    cdef cython.dict _answer_question(self, DNSQuestion question, unsigned int strategy_type, list types, list services, DNSRRSet known_answers)

    @cython.locals(
        msg=DNSIncoming,
        msgs=list,
        strategy=_AnswerStrategy,
        question=DNSQuestion,
        answer_set=cython.dict,
        known_answers=DNSRRSet,
        known_answers_set=cython.set,
        is_unicast=bint,
        is_probe=object,
        now=double
    )
    cpdef QuestionAnswers async_response(self, cython.list msgs, cython.bint unicast_source)

    @cython.locals(name=str, question_lower_name=str)
    cdef list _get_answer_strategies(self, DNSQuestion question)

    @cython.locals(
        first_packet=DNSIncoming,
        ucast_source=bint,
    )
    cpdef void handle_assembled_query(
        self,
        list packets,
        object addr,
        object port,
        object transport,
        tuple v6_flow_scope
    )
07070100000039000081A400000000000000000000000167C7AD16000046FF000000000000000000000000000000000000004000000000python-zeroconf-0.146.0/src/zeroconf/_handlers/query_handler.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from typing import TYPE_CHECKING, cast

from .._cache import DNSCache, _UniqueRecordsType
from .._dns import DNSAddress, DNSPointer, DNSQuestion, DNSRecord, DNSRRSet
from .._protocol.incoming import DNSIncoming
from .._services.info import ServiceInfo
from .._transport import _WrappedTransport
from .._utils.net import IPVersion
from ..const import (
    _ADDRESS_RECORD_TYPES,
    _CLASS_IN,
    _DNS_OTHER_TTL,
    _MDNS_PORT,
    _ONE_SECOND,
    _SERVICE_TYPE_ENUMERATION_NAME,
    _TYPE_A,
    _TYPE_AAAA,
    _TYPE_ANY,
    _TYPE_NSEC,
    _TYPE_PTR,
    _TYPE_SRV,
    _TYPE_TXT,
)
from .answers import (
    QuestionAnswers,
    _AnswerWithAdditionalsType,
    construct_outgoing_multicast_answers,
    construct_outgoing_unicast_answers,
)

_RESPOND_IMMEDIATE_TYPES = {_TYPE_NSEC, _TYPE_SRV, *_ADDRESS_RECORD_TYPES}

_EMPTY_SERVICES_LIST: list[ServiceInfo] = []
_EMPTY_TYPES_LIST: list[str] = []

_IPVersion_ALL = IPVersion.All

_int = int
_str = str

_ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION = 0
_ANSWER_STRATEGY_POINTER = 1
_ANSWER_STRATEGY_ADDRESS = 2
_ANSWER_STRATEGY_SERVICE = 3
_ANSWER_STRATEGY_TEXT = 4

if TYPE_CHECKING:
    from .._core import Zeroconf


class _AnswerStrategy:
    __slots__ = ("question", "services", "strategy_type", "types")

    def __init__(
        self,
        question: DNSQuestion,
        strategy_type: _int,
        types: list[str],
        services: list[ServiceInfo],
    ) -> None:
        """Create an answer strategy."""
        self.question = question
        self.strategy_type = strategy_type
        self.types = types
        self.services = services


class _QueryResponse:
    """A pair for unicast and multicast DNSOutgoing responses."""

    __slots__ = (
        "_additionals",
        "_cache",
        "_is_probe",
        "_mcast_aggregate",
        "_mcast_aggregate_last_second",
        "_mcast_now",
        "_now",
        "_questions",
        "_ucast",
    )

    def __init__(self, cache: DNSCache, questions: list[DNSQuestion], is_probe: bool, now: float) -> None:
        """Build a query response."""
        self._is_probe = is_probe
        self._questions = questions
        self._now = now
        self._cache = cache
        self._additionals: _AnswerWithAdditionalsType = {}
        self._ucast: set[DNSRecord] = set()
        self._mcast_now: set[DNSRecord] = set()
        self._mcast_aggregate: set[DNSRecord] = set()
        self._mcast_aggregate_last_second: set[DNSRecord] = set()

    def add_qu_question_response(self, answers: _AnswerWithAdditionalsType) -> None:
        """Generate a response to a multicast QU query."""
        for record, additionals in answers.items():
            self._additionals[record] = additionals
            if self._is_probe:
                self._ucast.add(record)
            if not self._has_mcast_within_one_quarter_ttl(record):
                self._mcast_now.add(record)
            elif not self._is_probe:
                self._ucast.add(record)

    def add_ucast_question_response(self, answers: _AnswerWithAdditionalsType) -> None:
        """Generate a response to a unicast query."""
        self._additionals.update(answers)
        self._ucast.update(answers)

    def add_mcast_question_response(self, answers: _AnswerWithAdditionalsType) -> None:
        """Generate a response to a multicast query."""
        self._additionals.update(answers)
        for answer in answers:
            if self._is_probe:
                self._mcast_now.add(answer)
                continue

            if self._has_mcast_record_in_last_second(answer):
                self._mcast_aggregate_last_second.add(answer)
                continue

            if len(self._questions) == 1:
                question = self._questions[0]
                if question.type in _RESPOND_IMMEDIATE_TYPES:
                    self._mcast_now.add(answer)
                    continue

            self._mcast_aggregate.add(answer)

    def answers(
        self,
    ) -> QuestionAnswers:
        """Return answer sets that will be queued."""
        ucast = {r: self._additionals[r] for r in self._ucast}
        mcast_now = {r: self._additionals[r] for r in self._mcast_now}
        mcast_aggregate = {r: self._additionals[r] for r in self._mcast_aggregate}
        mcast_aggregate_last_second = {r: self._additionals[r] for r in self._mcast_aggregate_last_second}
        return QuestionAnswers(ucast, mcast_now, mcast_aggregate, mcast_aggregate_last_second)

    def _has_mcast_within_one_quarter_ttl(self, record: DNSRecord) -> bool:
        """Check to see if a record has been mcasted recently.

        https://datatracker.ietf.org/doc/html/rfc6762#section-5.4
        When receiving a question with the unicast-response bit set, a
        responder SHOULD usually respond with a unicast packet directed back
        to the querier.  However, if the responder has not multicast that
        record recently (within one quarter of its TTL), then the responder
        SHOULD instead multicast the response so as to keep all the peer
        caches up to date
        """
        if TYPE_CHECKING:
            record = cast(_UniqueRecordsType, record)
        maybe_entry = self._cache.async_get_unique(record)
        return bool(maybe_entry is not None and maybe_entry.is_recent(self._now))

    def _has_mcast_record_in_last_second(self, record: DNSRecord) -> bool:
        """Check if an answer was seen in the last second.
        Protect the network against excessive packet flooding
        https://datatracker.ietf.org/doc/html/rfc6762#section-14
        """
        if TYPE_CHECKING:
            record = cast(_UniqueRecordsType, record)
        maybe_entry = self._cache.async_get_unique(record)
        return bool(maybe_entry is not None and self._now - maybe_entry.created < _ONE_SECOND)


class QueryHandler:
    """Query the ServiceRegistry."""

    __slots__ = (
        "cache",
        "out_delay_queue",
        "out_queue",
        "question_history",
        "registry",
        "zc",
    )

    def __init__(self, zc: Zeroconf) -> None:
        """Init the query handler."""
        self.zc = zc
        self.registry = zc.registry
        self.cache = zc.cache
        self.question_history = zc.question_history
        self.out_queue = zc.out_queue
        self.out_delay_queue = zc.out_delay_queue

    def _add_service_type_enumeration_query_answers(
        self,
        types: list[str],
        answer_set: _AnswerWithAdditionalsType,
        known_answers: DNSRRSet,
    ) -> None:
        """Provide an answer to a service type enumeration query.

        https://datatracker.ietf.org/doc/html/rfc6763#section-9
        """
        for stype in types:
            dns_pointer = DNSPointer(
                _SERVICE_TYPE_ENUMERATION_NAME,
                _TYPE_PTR,
                _CLASS_IN,
                _DNS_OTHER_TTL,
                stype,
                0.0,
            )
            if not known_answers.suppresses(dns_pointer):
                answer_set[dns_pointer] = set()

    def _add_pointer_answers(
        self,
        services: list[ServiceInfo],
        answer_set: _AnswerWithAdditionalsType,
        known_answers: DNSRRSet,
    ) -> None:
        """Answer PTR/ANY question."""
        for service in services:
            # Add recommended additional answers according to
            # https://tools.ietf.org/html/rfc6763#section-12.1.
            dns_pointer = service._dns_pointer(None)
            if known_answers.suppresses(dns_pointer):
                continue
            answer_set[dns_pointer] = {
                service._dns_service(None),
                service._dns_text(None),
                *service._get_address_and_nsec_records(None),
            }

    def _add_address_answers(
        self,
        services: list[ServiceInfo],
        answer_set: _AnswerWithAdditionalsType,
        known_answers: DNSRRSet,
        type_: _int,
    ) -> None:
        """Answer A/AAAA/ANY question."""
        for service in services:
            answers: list[DNSAddress] = []
            additionals: set[DNSRecord] = set()
            seen_types: set[int] = set()
            for dns_address in service._dns_addresses(None, _IPVersion_ALL):
                seen_types.add(dns_address.type)
                if dns_address.type != type_:
                    additionals.add(dns_address)
                elif not known_answers.suppresses(dns_address):
                    answers.append(dns_address)
            missing_types: set[int] = _ADDRESS_RECORD_TYPES - seen_types
            if answers:
                if missing_types:
                    assert service.server is not None, "Service server must be set for NSEC record."
                    additionals.add(service._dns_nsec(list(missing_types), None))
                for answer in answers:
                    answer_set[answer] = additionals
            elif type_ in missing_types:
                assert service.server is not None, "Service server must be set for NSEC record."
                answer_set[service._dns_nsec(list(missing_types), None)] = set()

    def _answer_question(
        self,
        question: DNSQuestion,
        strategy_type: _int,
        types: list[str],
        services: list[ServiceInfo],
        known_answers: DNSRRSet,
    ) -> _AnswerWithAdditionalsType:
        """Answer a question."""
        answer_set: _AnswerWithAdditionalsType = {}

        if strategy_type == _ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION:
            self._add_service_type_enumeration_query_answers(types, answer_set, known_answers)
        elif strategy_type == _ANSWER_STRATEGY_POINTER:
            self._add_pointer_answers(services, answer_set, known_answers)
        elif strategy_type == _ANSWER_STRATEGY_ADDRESS:
            self._add_address_answers(services, answer_set, known_answers, question.type)
        elif strategy_type == _ANSWER_STRATEGY_SERVICE:
            # Add recommended additional answers according to
            # https://tools.ietf.org/html/rfc6763#section-12.2.
            service = services[0]
            dns_service = service._dns_service(None)
            if not known_answers.suppresses(dns_service):
                answer_set[dns_service] = service._get_address_and_nsec_records(None)
        elif strategy_type == _ANSWER_STRATEGY_TEXT:  # pragma: no branch
            service = services[0]
            dns_text = service._dns_text(None)
            if not known_answers.suppresses(dns_text):
                answer_set[dns_text] = set()

        return answer_set

    def async_response(  # pylint: disable=unused-argument
        self, msgs: list[DNSIncoming], ucast_source: bool
    ) -> QuestionAnswers | None:
        """Deal with incoming query packets. Provides a response if possible.

        This function must be run in the event loop as it is not
        threadsafe.
        """
        strategies: list[_AnswerStrategy] = []
        for msg in msgs:
            for question in msg._questions:
                strategies.extend(self._get_answer_strategies(question))

        if not strategies:
            # We have no way to answer the question because we have
            # nothing in the ServiceRegistry that matches or we do not
            # understand the question.
            return None

        is_probe = False
        msg = msgs[0]
        questions = msg._questions
        # Only decode known answers if we are not a probe and we have
        # at least one answer strategy
        answers: list[DNSRecord] = []
        for msg in msgs:
            if msg.is_probe():
                is_probe = True
            else:
                answers.extend(msg.answers())

        query_res = _QueryResponse(self.cache, questions, is_probe, msg.now)
        known_answers = DNSRRSet(answers)
        known_answers_set: set[DNSRecord] | None = None
        now = msg.now
        for strategy in strategies:
            question = strategy.question
            is_unicast = question.unique  # unique and unicast are the same flag
            if not is_unicast:
                if known_answers_set is None:  # pragma: no branch
                    known_answers_set = known_answers.lookup_set()
                self.question_history.add_question_at_time(question, now, known_answers_set)
            answer_set = self._answer_question(
                question,
                strategy.strategy_type,
                strategy.types,
                strategy.services,
                known_answers,
            )
            if not ucast_source and is_unicast:
                query_res.add_qu_question_response(answer_set)
                continue
            if ucast_source:
                query_res.add_ucast_question_response(answer_set)
            # We always multicast as well even if its a unicast
            # source as long as we haven't done it recently (75% of ttl)
            query_res.add_mcast_question_response(answer_set)

        return query_res.answers()

    def _get_answer_strategies(
        self,
        question: DNSQuestion,
    ) -> list[_AnswerStrategy]:
        """Collect strategies to answer a question."""
        name = question.name
        question_lower_name = name.lower()
        type_ = question.type
        strategies: list[_AnswerStrategy] = []

        if type_ == _TYPE_PTR and question_lower_name == _SERVICE_TYPE_ENUMERATION_NAME:
            types = self.registry.async_get_types()
            if types:
                strategies.append(
                    _AnswerStrategy(
                        question,
                        _ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION,
                        types,
                        _EMPTY_SERVICES_LIST,
                    )
                )
            return strategies

        if type_ in (_TYPE_PTR, _TYPE_ANY):
            services = self.registry.async_get_infos_type(question_lower_name)
            if services:
                strategies.append(
                    _AnswerStrategy(question, _ANSWER_STRATEGY_POINTER, _EMPTY_TYPES_LIST, services)
                )

        if type_ in (_TYPE_A, _TYPE_AAAA, _TYPE_ANY):
            services = self.registry.async_get_infos_server(question_lower_name)
            if services:
                strategies.append(
                    _AnswerStrategy(question, _ANSWER_STRATEGY_ADDRESS, _EMPTY_TYPES_LIST, services)
                )

        if type_ in (_TYPE_SRV, _TYPE_TXT, _TYPE_ANY):
            service = self.registry.async_get_info_name(question_lower_name)
            if service is not None:
                if type_ in (_TYPE_SRV, _TYPE_ANY):
                    strategies.append(
                        _AnswerStrategy(
                            question,
                            _ANSWER_STRATEGY_SERVICE,
                            _EMPTY_TYPES_LIST,
                            [service],
                        )
                    )
                if type_ in (_TYPE_TXT, _TYPE_ANY):
                    strategies.append(
                        _AnswerStrategy(
                            question,
                            _ANSWER_STRATEGY_TEXT,
                            _EMPTY_TYPES_LIST,
                            [service],
                        )
                    )

        return strategies

    def handle_assembled_query(
        self,
        packets: list[DNSIncoming],
        addr: _str,
        port: _int,
        transport: _WrappedTransport,
        v6_flow_scope: tuple[()] | tuple[int, int],
    ) -> None:
        """Respond to a (re)assembled query.

        If the protocol received packets with the TC bit set, it will
        wait a bit for the rest of the packets and only call
        handle_assembled_query once it has a complete set of packets
        or the timer expires. If the TC bit is not set, a single
        packet will be in packets.
        """
        first_packet = packets[0]
        ucast_source = port != _MDNS_PORT
        question_answers = self.async_response(packets, ucast_source)
        if question_answers is None:
            return
        if question_answers.ucast:
            questions = first_packet._questions
            id_ = first_packet.id
            out = construct_outgoing_unicast_answers(question_answers.ucast, ucast_source, questions, id_)
            # When sending unicast, only send back the reply
            # via the same socket that it was received from
            # as we know its reachable from that socket
            self.zc.async_send(out, addr, port, v6_flow_scope, transport)
        if question_answers.mcast_now:
            self.zc.async_send(construct_outgoing_multicast_answers(question_answers.mcast_now))
        if question_answers.mcast_aggregate:
            self.out_queue.async_add(first_packet.now, question_answers.mcast_aggregate)
        if question_answers.mcast_aggregate_last_second:
            # https://datatracker.ietf.org/doc/html/rfc6762#section-14
            # If we broadcast it in the last second, we have to delay
            # at least a second before we send it again
            self.out_delay_queue.async_add(first_packet.now, question_answers.mcast_aggregate_last_second)
0707010000003A000081A400000000000000000000000167C7AD16000004C5000000000000000000000000000000000000004200000000python-zeroconf-0.146.0/src/zeroconf/_handlers/record_manager.pxd
import cython

from .._cache cimport DNSCache
from .._dns cimport DNSQuestion, DNSRecord
from .._protocol.incoming cimport DNSIncoming
from .._updates cimport RecordUpdateListener
from .._utils.time cimport current_time_millis
from .._record_update cimport RecordUpdate

cdef cython.float _DNS_PTR_MIN_TTL
cdef cython.uint _TYPE_PTR
cdef object _ADDRESS_RECORD_TYPES
cdef bint TYPE_CHECKING
cdef object _TYPE_PTR


cdef class RecordManager:

    cdef public object zc
    cdef public DNSCache cache
    cdef public cython.set listeners

    cpdef void async_updates(self, object now, list records)

    cpdef void async_updates_complete(self, bint notify)

    @cython.locals(
        cache=DNSCache,
        record=DNSRecord,
        answers=cython.list,
        maybe_entry=DNSRecord,
        rec_update=RecordUpdate
    )
    cpdef void async_updates_from_response(self, DNSIncoming msg)

    cpdef void async_add_listener(self, RecordUpdateListener listener, object question)

    cpdef void async_remove_listener(self, RecordUpdateListener listener)

    @cython.locals(question=DNSQuestion, record=DNSRecord)
    cdef void _async_update_matching_records(self, RecordUpdateListener listener, cython.list questions)
0707010000003B000081A400000000000000000000000167C7AD16000021CC000000000000000000000000000000000000004100000000python-zeroconf-0.146.0/src/zeroconf/_handlers/record_manager.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from typing import TYPE_CHECKING, cast

from .._cache import _UniqueRecordsType
from .._dns import DNSQuestion, DNSRecord
from .._logger import log
from .._protocol.incoming import DNSIncoming
from .._record_update import RecordUpdate
from .._updates import RecordUpdateListener
from .._utils.time import current_time_millis
from ..const import _ADDRESS_RECORD_TYPES, _DNS_PTR_MIN_TTL, _TYPE_PTR

if TYPE_CHECKING:
    from .._core import Zeroconf

_float = float


class RecordManager:
    """Process records into the cache and notify listeners."""

    __slots__ = ("cache", "listeners", "zc")

    def __init__(self, zeroconf: Zeroconf) -> None:
        """Init the record manager."""
        self.zc = zeroconf
        self.cache = zeroconf.cache
        self.listeners: set[RecordUpdateListener] = set()

    def async_updates(self, now: _float, records: list[RecordUpdate]) -> None:
        """Used to notify listeners of new information that has updated
        a record.

        This method must be called before the cache is updated.

        This method will be run in the event loop.
        """
        for listener in self.listeners.copy():
            listener.async_update_records(self.zc, now, records)

    def async_updates_complete(self, notify: bool) -> None:
        """Used to notify listeners of new information that has updated
        a record.

        This method must be called after the cache is updated.

        This method will be run in the event loop.
        """
        for listener in self.listeners.copy():
            listener.async_update_records_complete()
        if notify:
            self.zc.async_notify_all()

    def async_updates_from_response(self, msg: DNSIncoming) -> None:
        """Deal with incoming response packets.  All answers
        are held in the cache, and listeners are notified.

        This function must be run in the event loop as it is not
        threadsafe.
        """
        updates: list[RecordUpdate] = []
        address_adds: list[DNSRecord] = []
        other_adds: list[DNSRecord] = []
        removes: set[DNSRecord] = set()
        now = msg.now
        unique_types: set[tuple[str, int, int]] = set()
        cache = self.cache
        answers = msg.answers()

        for record in answers:
            # Protect zeroconf from records that can cause denial of service.
            #
            # We enforce a minimum TTL for PTR records to avoid
            # ServiceBrowsers generating excessive queries refresh queries.
            # Apple uses a 15s minimum TTL, however we do not have the same
            # level of rate limit and safe guards so we use 1/4 of the recommended value.
            record_type = record.type
            record_ttl = record.ttl
            if record_ttl and record_type == _TYPE_PTR and record_ttl < _DNS_PTR_MIN_TTL:
                log.debug(
                    "Increasing effective ttl of %s to minimum of %s to protect against excessive refreshes.",
                    record,
                    _DNS_PTR_MIN_TTL,
                )
                # Safe because the record is never in the cache yet
                record._set_created_ttl(record.created, _DNS_PTR_MIN_TTL)

            if record.unique:  # https://tools.ietf.org/html/rfc6762#section-10.2
                unique_types.add((record.name, record_type, record.class_))

            if TYPE_CHECKING:
                record = cast(_UniqueRecordsType, record)

            maybe_entry = cache.async_get_unique(record)
            if not record.is_expired(now):
                if record_type in _ADDRESS_RECORD_TYPES:
                    address_adds.append(record)
                else:
                    other_adds.append(record)
                rec_update = RecordUpdate.__new__(RecordUpdate)
                rec_update._fast_init(record, maybe_entry)
                updates.append(rec_update)
            # This is likely a goodbye since the record is
            # expired and exists in the cache
            elif maybe_entry is not None:
                rec_update = RecordUpdate.__new__(RecordUpdate)
                rec_update._fast_init(record, maybe_entry)
                updates.append(rec_update)
                removes.add(record)

        if unique_types:
            cache.async_mark_unique_records_older_than_1s_to_expire(unique_types, answers, now)

        if updates:
            self.async_updates(now, updates)
        # The cache adds must be processed AFTER we trigger
        # the updates since we compare existing data
        # with the new data and updating the cache
        # ahead of update_record will cause listeners
        # to miss changes
        #
        # We must process address adds before non-addresses
        # otherwise a fetch of ServiceInfo may miss an address
        # because it thinks the cache is complete
        #
        # The cache is processed under the context manager to ensure
        # that any ServiceBrowser that is going to call
        # zc.get_service_info will see the cached value
        # but ONLY after all the record updates have been
        # processed.
        new = False
        if other_adds or address_adds:
            new = cache.async_add_records(address_adds)
            if cache.async_add_records(other_adds):
                new = True
        # Removes are processed last since
        # ServiceInfo could generate an un-needed query
        # because the data was not yet populated.
        if removes:
            cache.async_remove_records(removes)
        if updates:
            self.async_updates_complete(new)

    def async_add_listener(
        self,
        listener: RecordUpdateListener,
        question: DNSQuestion | list[DNSQuestion] | None,
    ) -> None:
        """Adds a listener for a given question.  The listener will have
        its update_record method called when information is available to
        answer the question(s).

        This function is not thread-safe and must be called in the eventloop.
        """
        if not isinstance(listener, RecordUpdateListener):
            log.error(  # type: ignore[unreachable]
                "listeners passed to async_add_listener must inherit from RecordUpdateListener;"
                " In the future this will fail"
            )

        self.listeners.add(listener)

        if question is None:
            return

        questions = [question] if isinstance(question, DNSQuestion) else question
        self._async_update_matching_records(listener, questions)

    def _async_update_matching_records(
        self, listener: RecordUpdateListener, questions: list[DNSQuestion]
    ) -> None:
        """Calls back any existing entries in the cache that answer the question.

        This function must be run from the event loop.
        """
        now = current_time_millis()
        records: list[RecordUpdate] = [
            RecordUpdate(record, None)
            for question in questions
            for record in self.cache.async_entries_with_name(question.name)
            if not record.is_expired(now) and question.answered_by(record)
        ]
        if not records:
            return
        listener.async_update_records(self.zc, now, records)
        listener.async_update_records_complete()
        self.zc.async_notify_all()

    def async_remove_listener(self, listener: RecordUpdateListener) -> None:
        """Removes a listener.

        This function is not threadsafe and must be called in the eventloop.
        """
        try:
            self.listeners.remove(listener)
            self.zc.async_notify_all()
        except ValueError as e:
            log.exception("Failed to remove listener: %r", e)
0707010000003C000081A400000000000000000000000167C7AD1600000232000000000000000000000000000000000000003200000000python-zeroconf-0.146.0/src/zeroconf/_history.pxdimport cython

from ._dns cimport DNSQuestion


cdef cython.double _DUPLICATE_QUESTION_INTERVAL

cdef class QuestionHistory:

    cdef cython.dict _history

    cpdef void add_question_at_time(self, DNSQuestion question, double now, cython.set known_answers)

    @cython.locals(than=double, previous_question=cython.tuple, previous_known_answers=cython.set)
    cpdef bint suppresses(self, DNSQuestion question, double now, cython.set known_answers)

    @cython.locals(than=double, now_known_answers=cython.tuple)
    cpdef void async_expire(self, double now)
0707010000003D000081A400000000000000000000000167C7AD1600000BFF000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/src/zeroconf/_history.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from ._dns import DNSQuestion, DNSRecord
from .const import _DUPLICATE_QUESTION_INTERVAL

# The QuestionHistory is used to implement Duplicate Question Suppression
# https://datatracker.ietf.org/doc/html/rfc6762#section-7.3

_float = float


class QuestionHistory:
    """Remember questions and known answers."""

    def __init__(self) -> None:
        """Init a new QuestionHistory."""
        self._history: dict[DNSQuestion, tuple[float, set[DNSRecord]]] = {}

    def add_question_at_time(self, question: DNSQuestion, now: _float, known_answers: set[DNSRecord]) -> None:
        """Remember a question with known answers."""
        self._history[question] = (now, known_answers)

    def suppresses(self, question: DNSQuestion, now: _float, known_answers: set[DNSRecord]) -> bool:
        """Check to see if a question should be suppressed.

        https://datatracker.ietf.org/doc/html/rfc6762#section-7.3
        When multiple queriers on the network are querying
        for the same resource records, there is no need for them to all be
        repeatedly asking the same question.
        """
        previous_question = self._history.get(question)
        # There was not previous question in the history
        if not previous_question:
            return False
        than, previous_known_answers = previous_question
        # The last question was older than 999ms
        if now - than > _DUPLICATE_QUESTION_INTERVAL:
            return False
        # The last question has more known answers than
        # we knew so we have to ask
        return not previous_known_answers - known_answers

    def async_expire(self, now: _float) -> None:
        """Expire the history of old questions."""
        removes: list[DNSQuestion] = []
        for question, now_known_answers in self._history.items():
            than, _ = now_known_answers
            if now - than > _DUPLICATE_QUESTION_INTERVAL:
                removes.append(question)
        for question in removes:
            del self._history[question]

    def clear(self) -> None:
        """Clear the history."""
        self._history.clear()
0707010000003E000081A400000000000000000000000167C7AD1600000639000000000000000000000000000000000000003300000000python-zeroconf-0.146.0/src/zeroconf/_listener.pxd
import cython

from ._handlers.query_handler cimport QueryHandler
from ._handlers.record_manager cimport RecordManager
from ._protocol.incoming cimport DNSIncoming
from ._services.registry cimport ServiceRegistry
from ._utils.time cimport current_time_millis, millis_to_seconds


cdef object log
cdef object DEBUG_ENABLED
cdef bint TYPE_CHECKING

cdef cython.uint _MAX_MSG_ABSOLUTE
cdef cython.uint _DUPLICATE_PACKET_SUPPRESSION_INTERVAL


cdef class AsyncListener:

    cdef public object zc
    cdef ServiceRegistry _registry
    cdef RecordManager _record_manager
    cdef QueryHandler _query_handler
    cdef public cython.bytes data
    cdef public double last_time
    cdef public DNSIncoming last_message
    cdef public object transport
    cdef public object sock_description
    cdef public cython.dict _deferred
    cdef public cython.dict _timers

    @cython.locals(now=double, debug=cython.bint)
    cpdef datagram_received(self, cython.bytes bytes, cython.tuple addrs)

    @cython.locals(msg=DNSIncoming)
    cpdef _process_datagram_at_time(self, bint debug, cython.uint data_len, double now, bytes data, cython.tuple addrs)

    cdef _cancel_any_timers_for_addr(self, object addr)

    @cython.locals(incoming=DNSIncoming, deferred=list)
    cpdef handle_query_or_defer(
        self,
        DNSIncoming msg,
        object addr,
        object port,
        object transport,
        tuple v6_flow_scope
    )

    cpdef _respond_query(
        self,
        object msg,
        object addr,
        object port,
        object transport,
        tuple v6_flow_scope
    )
0707010000003F000081A400000000000000000000000167C7AD16000022C2000000000000000000000000000000000000003200000000python-zeroconf-0.146.0/src/zeroconf/_listener.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import logging
import random
from functools import partial
from typing import TYPE_CHECKING, cast

from ._logger import QuietLogger, log
from ._protocol.incoming import DNSIncoming
from ._transport import _WrappedTransport, make_wrapped_transport
from ._utils.time import current_time_millis, millis_to_seconds
from .const import _DUPLICATE_PACKET_SUPPRESSION_INTERVAL, _MAX_MSG_ABSOLUTE

if TYPE_CHECKING:
    from ._core import Zeroconf

_TC_DELAY_RANDOM_INTERVAL = (400, 500)


_bytes = bytes
_str = str
_int = int
_float = float

DEBUG_ENABLED = partial(log.isEnabledFor, logging.DEBUG)


class AsyncListener:
    """A Listener is used by this module to listen on the multicast
    group to which DNS messages are sent, allowing the implementation
    to cache information as it arrives.

    It requires registration with an Engine object in order to have
    the read() method called when a socket is available for reading."""

    __slots__ = (
        "_deferred",
        "_query_handler",
        "_record_manager",
        "_registry",
        "_timers",
        "data",
        "last_message",
        "last_time",
        "sock_description",
        "transport",
        "zc",
    )

    def __init__(self, zc: Zeroconf) -> None:
        self.zc = zc
        self._registry = zc.registry
        self._record_manager = zc.record_manager
        self._query_handler = zc.query_handler
        self.data: bytes | None = None
        self.last_time: float = 0
        self.last_message: DNSIncoming | None = None
        self.transport: _WrappedTransport | None = None
        self.sock_description: str | None = None
        self._deferred: dict[str, list[DNSIncoming]] = {}
        self._timers: dict[str, asyncio.TimerHandle] = {}
        super().__init__()

    def datagram_received(self, data: _bytes, addrs: tuple[str, int] | tuple[str, int, int, int]) -> None:
        data_len = len(data)
        debug = DEBUG_ENABLED()

        if data_len > _MAX_MSG_ABSOLUTE:
            # Guard against oversized packets to ensure bad implementations cannot overwhelm
            # the system.
            if debug:
                log.debug(
                    "Discarding incoming packet with length %s, which is larger "
                    "than the absolute maximum size of %s",
                    data_len,
                    _MAX_MSG_ABSOLUTE,
                )
            return
        now = current_time_millis()
        self._process_datagram_at_time(debug, data_len, now, data, addrs)

    def _process_datagram_at_time(
        self,
        debug: bool,
        data_len: _int,
        now: _float,
        data: _bytes,
        addrs: tuple[str, int] | tuple[str, int, int, int],
    ) -> None:
        if (
            self.data == data
            and (now - _DUPLICATE_PACKET_SUPPRESSION_INTERVAL) < self.last_time
            and self.last_message is not None
            and not self.last_message.has_qu_question()
        ):
            # Guard against duplicate packets
            if debug:
                log.debug(
                    "Ignoring duplicate message with no unicast questions"
                    " received from %s [socket %s] (%d bytes) as [%r]",
                    addrs,
                    self.sock_description,
                    data_len,
                    data,
                )
            return

        if len(addrs) == 2:
            v6_flow_scope: tuple[()] | tuple[int, int] = ()
            # https://github.com/python/mypy/issues/1178
            addr, port = addrs
            addr_port = addrs
            if TYPE_CHECKING:
                addr_port = cast(tuple[str, int], addr_port)
            scope = None
        else:
            # https://github.com/python/mypy/issues/1178
            addr, port, flow, scope = addrs
            if debug:  # pragma: no branch
                log.debug("IPv6 scope_id %d associated to the receiving interface", scope)
            v6_flow_scope = (flow, scope)
            addr_port = (addr, port)

        msg = DNSIncoming(data, addr_port, scope, now)
        self.data = data
        self.last_time = now
        self.last_message = msg
        if msg.valid is True:
            if debug:
                log.debug(
                    "Received from %r:%r [socket %s]: %r (%d bytes) as [%r]",
                    addr,
                    port,
                    self.sock_description,
                    msg,
                    data_len,
                    data,
                )
        else:
            if debug:
                log.debug(
                    "Received from %r:%r [socket %s]: (%d bytes) [%r]",
                    addr,
                    port,
                    self.sock_description,
                    data_len,
                    data,
                )
            return

        if not msg.is_query():
            self._record_manager.async_updates_from_response(msg)
            return

        if not self._registry.has_entries:
            # If the registry is empty, we have no answers to give.
            return

        if TYPE_CHECKING:
            assert self.transport is not None
        self.handle_query_or_defer(msg, addr, port, self.transport, v6_flow_scope)

    def handle_query_or_defer(
        self,
        msg: DNSIncoming,
        addr: _str,
        port: _int,
        transport: _WrappedTransport,
        v6_flow_scope: tuple[()] | tuple[int, int],
    ) -> None:
        """Deal with incoming query packets.  Provides a response if
        possible."""
        if not msg.truncated:
            self._respond_query(msg, addr, port, transport, v6_flow_scope)
            return

        deferred = self._deferred.setdefault(addr, [])
        # If we get the same packet we ignore it
        for incoming in reversed(deferred):
            if incoming.data == msg.data:
                return
        deferred.append(msg)
        delay = millis_to_seconds(random.randint(*_TC_DELAY_RANDOM_INTERVAL))  # noqa: S311
        loop = self.zc.loop
        assert loop is not None
        self._cancel_any_timers_for_addr(addr)
        self._timers[addr] = loop.call_at(
            loop.time() + delay,
            self._respond_query,
            None,
            addr,
            port,
            transport,
            v6_flow_scope,
        )

    def _cancel_any_timers_for_addr(self, addr: _str) -> None:
        """Cancel any future truncated packet timers for the address."""
        if addr in self._timers:
            self._timers.pop(addr).cancel()

    def _respond_query(
        self,
        msg: DNSIncoming | None,
        addr: _str,
        port: _int,
        transport: _WrappedTransport,
        v6_flow_scope: tuple[()] | tuple[int, int],
    ) -> None:
        """Respond to a query and reassemble any truncated deferred packets."""
        self._cancel_any_timers_for_addr(addr)
        packets = self._deferred.pop(addr, [])
        if msg:
            packets.append(msg)

        self._query_handler.handle_assembled_query(packets, addr, port, transport, v6_flow_scope)

    def error_received(self, exc: Exception) -> None:
        """Likely socket closed or IPv6."""
        # We preformat the message string with the socket as we want
        # log_exception_once to log a warning message once PER EACH
        # different socket in case there are problems with multiple
        # sockets
        msg_str = f"Error with socket {self.sock_description}): %s"
        QuietLogger.log_exception_once(exc, msg_str, exc)

    def connection_made(self, transport: asyncio.BaseTransport) -> None:
        wrapped_transport = make_wrapped_transport(cast(asyncio.DatagramTransport, transport))
        self.transport = wrapped_transport
        self.sock_description = f"{wrapped_transport.fileno} ({wrapped_transport.sock_name})"

    def connection_lost(self, exc: Exception | None) -> None:
        """Handle connection lost."""
07070100000040000081A400000000000000000000000167C7AD1600000B88000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/src/zeroconf/_logger.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
    )
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import logging
import sys
from typing import Any, ClassVar, cast

log = logging.getLogger(__name__.split(".", maxsplit=1)[0])
log.addHandler(logging.NullHandler())


def set_logger_level_if_unset() -> None:
    if log.level == logging.NOTSET:
        log.setLevel(logging.WARN)


set_logger_level_if_unset()


class QuietLogger:
    _seen_logs: ClassVar[dict[str, int | tuple]] = {}

    @classmethod
    def log_exception_warning(cls, *logger_data: Any) -> None:
        exc_info = sys.exc_info()
        exc_str = str(exc_info[1])
        if exc_str not in cls._seen_logs:
            # log at warning level the first time this is seen
            cls._seen_logs[exc_str] = exc_info
            logger = log.warning
        else:
            logger = log.debug
        logger(*(logger_data or ["Exception occurred"]), exc_info=True)

    @classmethod
    def log_exception_debug(cls, *logger_data: Any) -> None:
        log_exc_info = False
        exc_info = sys.exc_info()
        exc_str = str(exc_info[1])
        if exc_str not in cls._seen_logs:
            # log the trace only on the first time
            cls._seen_logs[exc_str] = exc_info
            log_exc_info = True
        log.debug(*(logger_data or ["Exception occurred"]), exc_info=log_exc_info)

    @classmethod
    def log_warning_once(cls, *args: Any) -> None:
        msg_str = args[0]
        if msg_str not in cls._seen_logs:
            cls._seen_logs[msg_str] = 0
            logger = log.warning
        else:
            logger = log.debug
        cls._seen_logs[msg_str] = cast(int, cls._seen_logs[msg_str]) + 1
        logger(*args)

    @classmethod
    def log_exception_once(cls, exc: Exception, *args: Any) -> None:
        msg_str = args[0]
        if msg_str not in cls._seen_logs:
            cls._seen_logs[msg_str] = 0
            logger = log.warning
        else:
            logger = log.debug
        cls._seen_logs[msg_str] = cast(int, cls._seen_logs[msg_str]) + 1
        logger(*args, exc_info=exc)
07070100000041000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/src/zeroconf/_protocol07070100000042000081A400000000000000000000000167C7AD16000003B2000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_protocol/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations
07070100000043000081A400000000000000000000000167C7AD1600000CEE000000000000000000000000000000000000003C00000000python-zeroconf-0.146.0/src/zeroconf/_protocol/incoming.pxd
import cython


cdef cython.uint DNS_COMPRESSION_HEADER_LEN
cdef cython.uint MAX_DNS_LABELS
cdef cython.uint DNS_COMPRESSION_POINTER_LEN
cdef cython.uint MAX_NAME_LENGTH

cdef cython.uint _TYPE_A
cdef cython.uint _TYPE_CNAME
cdef cython.uint _TYPE_PTR
cdef cython.uint _TYPE_TXT
cdef cython.uint _TYPE_SRV
cdef cython.uint _TYPE_HINFO
cdef cython.uint _TYPE_AAAA
cdef cython.uint _TYPE_NSEC
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_TC
cdef cython.uint _FLAGS_QR_QUERY
cdef cython.uint _FLAGS_QR_RESPONSE

cdef object DECODE_EXCEPTIONS

cdef object IncomingDecodeError

from .._dns cimport (
    DNSAddress,
    DNSEntry,
    DNSHinfo,
    DNSNsec,
    DNSPointer,
    DNSQuestion,
    DNSRecord,
    DNSService,
    DNSText,
)
from .._utils.time cimport current_time_millis


cdef class DNSIncoming:

    cdef bint _did_read_others
    cdef public unsigned int flags
    cdef cython.uint offset
    cdef public bytes data
    cdef const unsigned char [:] view
    cdef unsigned int _data_len
    cdef cython.dict _name_cache
    cdef cython.list _questions
    cdef cython.list _answers
    cdef public cython.uint id
    cdef cython.uint _num_questions
    cdef cython.uint _num_answers
    cdef cython.uint _num_authorities
    cdef cython.uint _num_additionals
    cdef public bint valid
    cdef public double now
    cdef public object scope_id
    cdef public object source
    cdef bint _has_qu_question

    @cython.locals(
        question=DNSQuestion
    )
    cpdef bint has_qu_question(self)

    cpdef bint is_query(self)

    cpdef bint is_probe(self)

    cpdef list answers(self)

    cpdef bint is_response(self)

    @cython.locals(
        off="unsigned int",
        label_idx="unsigned int",
        length="unsigned int",
        link="unsigned int",
        link_data="unsigned int",
        link_py_int=object,
        linked_labels=cython.list
    )
    cdef unsigned int _decode_labels_at_offset(self, unsigned int off, cython.list labels, cython.set seen_pointers)

    @cython.locals(offset="unsigned int")
    cdef void _read_header(self)

    cdef void _initial_parse(self)

    @cython.locals(
        end="unsigned int",
        length="unsigned int",
        offset="unsigned int"
    )
    cdef void _read_others(self)

    @cython.locals(offset="unsigned int", question=DNSQuestion)
    cdef _read_questions(self)

    @cython.locals(
        length="unsigned int",
    )
    cdef str _read_character_string(self)

    cdef bytes _read_string(self, unsigned int length)

    @cython.locals(
        name_start="unsigned int",
        offset="unsigned int",
        address_rec=DNSAddress,
        pointer_rec=DNSPointer,
        text_rec=DNSText,
        srv_rec=DNSService,
        hinfo_rec=DNSHinfo,
        nsec_rec=DNSNsec,
    )
    cdef _read_record(self, str domain, unsigned int type_, unsigned int class_, unsigned int ttl, unsigned int length)

    @cython.locals(
        offset="unsigned int",
        offset_plus_one="unsigned int",
        offset_plus_two="unsigned int",
        window="unsigned int",
        bit="unsigned int",
        byte="unsigned int",
        i="unsigned int",
        bitmap_length="unsigned int",
    )
    cdef list _read_bitmap(self, unsigned int end)

    cdef str _read_name(self)
07070100000044000081A400000000000000000000000167C7AD1600004161000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_protocol/incoming.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import struct
import sys
from typing import Any

from .._dns import (
    DNSAddress,
    DNSHinfo,
    DNSNsec,
    DNSPointer,
    DNSQuestion,
    DNSRecord,
    DNSService,
    DNSText,
)
from .._exceptions import IncomingDecodeError
from .._logger import log
from .._utils.time import current_time_millis
from ..const import (
    _FLAGS_QR_MASK,
    _FLAGS_QR_QUERY,
    _FLAGS_QR_RESPONSE,
    _FLAGS_TC,
    _TYPE_A,
    _TYPE_AAAA,
    _TYPE_CNAME,
    _TYPE_HINFO,
    _TYPE_NSEC,
    _TYPE_PTR,
    _TYPE_SRV,
    _TYPE_TXT,
    _TYPES,
)

DNS_COMPRESSION_HEADER_LEN = 1
DNS_COMPRESSION_POINTER_LEN = 2
MAX_DNS_LABELS = 128
MAX_NAME_LENGTH = 253

DECODE_EXCEPTIONS = (IndexError, struct.error, IncomingDecodeError)


_seen_logs: dict[str, int | tuple] = {}
_str = str
_int = int


class DNSIncoming:
    """Object representation of an incoming DNS packet"""

    __slots__ = (
        "_answers",
        "_data_len",
        "_did_read_others",
        "_has_qu_question",
        "_name_cache",
        "_num_additionals",
        "_num_answers",
        "_num_authorities",
        "_num_questions",
        "_questions",
        "data",
        "flags",
        "id",
        "now",
        "offset",
        "scope_id",
        "source",
        "valid",
        "view",
    )

    def __init__(
        self,
        data: bytes,
        source: tuple[str, int] | None = None,
        scope_id: int | None = None,
        now: float | None = None,
    ) -> None:
        """Constructor from string holding bytes of packet"""
        self.flags = 0
        self.offset = 0
        self.data = data
        self.view = data
        self._data_len = len(data)
        self._name_cache: dict[int, list[str]] = {}
        self._questions: list[DNSQuestion] = []
        self._answers: list[DNSRecord] = []
        self.id = 0
        self._num_questions = 0
        self._num_answers = 0
        self._num_authorities = 0
        self._num_additionals = 0
        self.valid = False
        self._did_read_others = False
        self.now = now or current_time_millis()
        self.source = source
        self.scope_id = scope_id
        self._has_qu_question = False
        try:
            self._initial_parse()
        except DECODE_EXCEPTIONS:
            self._log_exception_debug(
                "Received invalid packet from %s at offset %d while unpacking %r",
                self.source,
                self.offset,
                self.data,
            )

    def is_query(self) -> bool:
        """Returns true if this is a query."""
        return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY

    def is_response(self) -> bool:
        """Returns true if this is a response."""
        return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE

    def has_qu_question(self) -> bool:
        """Returns true if any question is a QU question."""
        return self._has_qu_question

    @property
    def truncated(self) -> bool:
        """Returns true if this is a truncated."""
        return (self.flags & _FLAGS_TC) == _FLAGS_TC

    @property
    def questions(self) -> list[DNSQuestion]:
        """Questions in the packet."""
        return self._questions

    @property
    def num_questions(self) -> int:
        """Number of questions in the packet."""
        return self._num_questions

    @property
    def num_answers(self) -> int:
        """Number of answers in the packet."""
        return self._num_answers

    @property
    def num_authorities(self) -> int:
        """Number of authorities in the packet."""
        return self._num_authorities

    @property
    def num_additionals(self) -> int:
        """Number of additionals in the packet."""
        return self._num_additionals

    def _initial_parse(self) -> None:
        """Parse the data needed to initialize the packet object."""
        self._read_header()
        self._read_questions()
        if not self._num_questions:
            self._read_others()
        self.valid = True

    @classmethod
    def _log_exception_debug(cls, *logger_data: Any) -> None:
        log_exc_info = False
        exc_info = sys.exc_info()
        exc_str = str(exc_info[1])
        if exc_str not in _seen_logs:
            # log the trace only on the first time
            _seen_logs[exc_str] = exc_info
            log_exc_info = True
        log.debug(*(logger_data or ["Exception occurred"]), exc_info=log_exc_info)

    def answers(self) -> list[DNSRecord]:
        """Answers in the packet."""
        if not self._did_read_others:
            try:
                self._read_others()
            except DECODE_EXCEPTIONS:
                self._log_exception_debug(
                    "Received invalid packet from %s at offset %d while unpacking %r",
                    self.source,
                    self.offset,
                    self.data,
                )
        return self._answers

    def is_probe(self) -> bool:
        """Returns true if this is a probe."""
        return self._num_authorities > 0

    def __repr__(self) -> str:
        return "<DNSIncoming:{}>".format(
            ", ".join(
                [
                    f"id={self.id}",
                    f"flags={self.flags}",
                    f"truncated={self.truncated}",
                    f"n_q={self._num_questions}",
                    f"n_ans={self._num_answers}",
                    f"n_auth={self._num_authorities}",
                    f"n_add={self._num_additionals}",
                    f"questions={self._questions}",
                    f"answers={self.answers()}",
                ]
            )
        )

    def _read_header(self) -> None:
        """Reads header portion of packet"""
        view = self.view
        offset = self.offset
        self.offset += 12
        # The header has 6 unsigned shorts in network order
        self.id = view[offset] << 8 | view[offset + 1]
        self.flags = view[offset + 2] << 8 | view[offset + 3]
        self._num_questions = view[offset + 4] << 8 | view[offset + 5]
        self._num_answers = view[offset + 6] << 8 | view[offset + 7]
        self._num_authorities = view[offset + 8] << 8 | view[offset + 9]
        self._num_additionals = view[offset + 10] << 8 | view[offset + 11]

    def _read_questions(self) -> None:
        """Reads questions section of packet"""
        view = self.view
        questions = self._questions
        for _ in range(self._num_questions):
            name = self._read_name()
            offset = self.offset
            self.offset += 4
            # The question has 2 unsigned shorts in network order
            type_ = view[offset] << 8 | view[offset + 1]
            class_ = view[offset + 2] << 8 | view[offset + 3]
            question = DNSQuestion.__new__(DNSQuestion)
            question._fast_init(name, type_, class_)
            if question.unique:  # QU questions use the same bit as unique
                self._has_qu_question = True
            questions.append(question)

    def _read_character_string(self) -> str:
        """Reads a character string from the packet"""
        length = self.view[self.offset]
        self.offset += 1
        info = self.data[self.offset : self.offset + length].decode("utf-8", "replace")
        self.offset += length
        return info

    def _read_string(self, length: _int) -> bytes:
        """Reads a string of a given length from the packet"""
        info = self.data[self.offset : self.offset + length]
        self.offset += length
        return info

    def _read_others(self) -> None:
        """Reads the answers, authorities and additionals section of the
        packet"""
        self._did_read_others = True
        view = self.view
        n = self._num_answers + self._num_authorities + self._num_additionals
        for _ in range(n):
            domain = self._read_name()
            offset = self.offset
            self.offset += 10
            # type_, class_ and length are unsigned shorts in network order
            # ttl is an unsigned long in network order https://www.rfc-editor.org/errata/eid2130
            type_ = view[offset] << 8 | view[offset + 1]
            class_ = view[offset + 2] << 8 | view[offset + 3]
            ttl = view[offset + 4] << 24 | view[offset + 5] << 16 | view[offset + 6] << 8 | view[offset + 7]
            length = view[offset + 8] << 8 | view[offset + 9]
            end = self.offset + length
            rec = None
            try:
                rec = self._read_record(domain, type_, class_, ttl, length)
            except DECODE_EXCEPTIONS:
                # Skip records that fail to decode if we know the length
                # If the packet is really corrupt read_name and the unpack
                # above would fail and hit the exception catch in read_others
                self.offset = end
                log.debug(
                    "Unable to parse; skipping record for %s with type %s at offset %d while unpacking %r",
                    domain,
                    _TYPES.get(type_, type_),
                    self.offset,
                    self.data,
                    exc_info=True,
                )
            if rec is not None:
                self._answers.append(rec)

    def _read_record(
        self, domain: _str, type_: _int, class_: _int, ttl: _int, length: _int
    ) -> DNSRecord | None:
        """Read known records types and skip unknown ones."""
        if type_ == _TYPE_A:
            address_rec = DNSAddress.__new__(DNSAddress)
            address_rec._fast_init(domain, type_, class_, ttl, self._read_string(4), None, self.now)
            return address_rec
        if type_ in (_TYPE_CNAME, _TYPE_PTR):
            pointer_rec = DNSPointer.__new__(DNSPointer)
            pointer_rec._fast_init(domain, type_, class_, ttl, self._read_name(), self.now)
            return pointer_rec
        if type_ == _TYPE_TXT:
            text_rec = DNSText.__new__(DNSText)
            text_rec._fast_init(domain, type_, class_, ttl, self._read_string(length), self.now)
            return text_rec
        if type_ == _TYPE_SRV:
            view = self.view
            offset = self.offset
            self.offset += 6
            # The SRV record has 3 unsigned shorts in network order
            priority = view[offset] << 8 | view[offset + 1]
            weight = view[offset + 2] << 8 | view[offset + 3]
            port = view[offset + 4] << 8 | view[offset + 5]
            srv_rec = DNSService.__new__(DNSService)
            srv_rec._fast_init(
                domain,
                type_,
                class_,
                ttl,
                priority,
                weight,
                port,
                self._read_name(),
                self.now,
            )
            return srv_rec
        if type_ == _TYPE_HINFO:
            hinfo_rec = DNSHinfo.__new__(DNSHinfo)
            hinfo_rec._fast_init(
                domain,
                type_,
                class_,
                ttl,
                self._read_character_string(),
                self._read_character_string(),
                self.now,
            )
            return hinfo_rec
        if type_ == _TYPE_AAAA:
            address_rec = DNSAddress.__new__(DNSAddress)
            address_rec._fast_init(
                domain,
                type_,
                class_,
                ttl,
                self._read_string(16),
                self.scope_id,
                self.now,
            )
            return address_rec
        if type_ == _TYPE_NSEC:
            name_start = self.offset
            nsec_rec = DNSNsec.__new__(DNSNsec)
            nsec_rec._fast_init(
                domain,
                type_,
                class_,
                ttl,
                self._read_name(),
                self._read_bitmap(name_start + length),
                self.now,
            )
            return nsec_rec
        # Try to ignore types we don't know about
        # Skip the payload for the resource record so the next
        # records can be parsed correctly
        self.offset += length
        return None

    def _read_bitmap(self, end: _int) -> list[int]:
        """Reads an NSEC bitmap from the packet."""
        rdtypes = []
        view = self.view
        while self.offset < end:
            offset = self.offset
            offset_plus_one = offset + 1
            offset_plus_two = offset + 2
            window = view[offset]
            bitmap_length = view[offset_plus_one]
            bitmap_end = offset_plus_two + bitmap_length
            for i, byte in enumerate(self.data[offset_plus_two:bitmap_end]):
                for bit in range(8):
                    if byte & (0x80 >> bit):
                        rdtypes.append(bit + window * 256 + i * 8)
            self.offset += 2 + bitmap_length
        return rdtypes

    def _read_name(self) -> str:
        """Reads a domain name from the packet."""
        labels: list[str] = []
        seen_pointers: set[int] = set()
        original_offset = self.offset
        self.offset = self._decode_labels_at_offset(original_offset, labels, seen_pointers)
        self._name_cache[original_offset] = labels
        name = ".".join(labels) + "."
        if len(name) > MAX_NAME_LENGTH:
            raise IncomingDecodeError(
                f"DNS name {name} exceeds maximum length of {MAX_NAME_LENGTH} from {self.source}"
            )
        return name

    def _decode_labels_at_offset(self, off: _int, labels: list[str], seen_pointers: set[int]) -> int:
        # This is a tight loop that is called frequently, small optimizations can make a difference.
        view = self.view
        while off < self._data_len:
            length = view[off]
            if length == 0:
                return off + DNS_COMPRESSION_HEADER_LEN

            if length < 0x40:
                label_idx = off + DNS_COMPRESSION_HEADER_LEN
                labels.append(self.data[label_idx : label_idx + length].decode("utf-8", "replace"))
                off += DNS_COMPRESSION_HEADER_LEN + length
                continue

            if length < 0xC0:
                raise IncomingDecodeError(
                    f"DNS compression type {length} is unknown at {off} from {self.source}"
                )

            # We have a DNS compression pointer
            link_data = view[off + 1]
            link = (length & 0x3F) * 256 + link_data
            link_py_int = link
            if link > self._data_len:
                raise IncomingDecodeError(
                    f"DNS compression pointer at {off} points to {link} beyond packet from {self.source}"
                )
            if link == off:
                raise IncomingDecodeError(
                    f"DNS compression pointer at {off} points to itself from {self.source}"
                )
            if link_py_int in seen_pointers:
                raise IncomingDecodeError(
                    f"DNS compression pointer at {off} was seen again from {self.source}"
                )
            linked_labels = self._name_cache.get(link_py_int)
            if not linked_labels:
                linked_labels = []
                seen_pointers.add(link_py_int)
                self._decode_labels_at_offset(link, linked_labels, seen_pointers)
                self._name_cache[link_py_int] = linked_labels
            labels.extend(linked_labels)
            if len(labels) > MAX_DNS_LABELS:
                raise IncomingDecodeError(
                    f"Maximum dns labels reached while processing pointer at {off} from {self.source}"
                )
            return off + DNS_COMPRESSION_POINTER_LEN

        raise IncomingDecodeError(f"Corrupt packet received while decoding name from {self.source}")
07070100000045000081A400000000000000000000000167C7AD160000104F000000000000000000000000000000000000003C00000000python-zeroconf-0.146.0/src/zeroconf/_protocol/outgoing.pxd
import cython

from .._dns cimport DNSEntry, DNSPointer, DNSQuestion, DNSRecord
from .incoming cimport DNSIncoming


cdef cython.uint _CLASS_UNIQUE
cdef cython.uint _DNS_PACKET_HEADER_LEN
cdef cython.uint _FLAGS_QR_MASK
cdef cython.uint _FLAGS_QR_QUERY
cdef cython.uint _FLAGS_QR_RESPONSE
cdef cython.uint _FLAGS_TC
cdef cython.uint _MAX_MSG_ABSOLUTE
cdef cython.uint _MAX_MSG_TYPICAL


cdef bint TYPE_CHECKING

cdef unsigned int SHORT_CACHE_MAX

cdef object PACK_BYTE
cdef object PACK_SHORT
cdef object PACK_LONG

cdef unsigned int STATE_INIT
cdef unsigned int STATE_FINISHED

cdef object LOGGING_IS_ENABLED_FOR
cdef object LOGGING_DEBUG

cdef cython.tuple BYTE_TABLE
cdef cython.tuple SHORT_LOOKUP
cdef cython.dict LONG_LOOKUP

cdef class DNSOutgoing:

    cdef public unsigned int flags
    cdef public bint finished
    cdef public object id
    cdef public bint multicast
    cdef public cython.list packets_data
    cdef public cython.dict names
    cdef public cython.list data
    cdef public unsigned int size
    cdef public bint allow_long
    cdef public unsigned int state
    cdef public cython.list questions
    cdef public cython.list answers
    cdef public cython.list authorities
    cdef public cython.list additionals

    cpdef void _reset_for_next_packet(self)

    cdef void _write_byte(self, cython.uint value)

    cdef void _insert_short_at_start(self, unsigned int value)

    cdef void _replace_short(self, cython.uint index, cython.uint value)

    cdef _get_short(self, cython.uint value)

    cdef void _write_int(self, object value)

    cdef cython.bint _write_question(self, DNSQuestion question)

    @cython.locals(
        d=cython.bytes,
        data_view=cython.list,
        index=cython.uint,
        length=cython.uint
    )
    cdef cython.bint _write_record(self, DNSRecord record, double now)

    @cython.locals(class_=cython.uint)
    cdef void _write_record_class(self, DNSEntry record)

    @cython.locals(
        start_size_int=object
    )
    cdef cython.bint _check_data_limit_or_rollback(self, cython.uint start_data_length, cython.uint start_size)

    @cython.locals(questions_written=cython.uint)
    cdef cython.uint _write_questions_from_offset(self, unsigned int questions_offset)

    @cython.locals(answers_written=cython.uint)
    cdef cython.uint _write_answers_from_offset(self, unsigned int answer_offset)

    @cython.locals(records_written=cython.uint)
    cdef cython.uint _write_records_from_offset(self, cython.list records, unsigned int offset)

    cdef bint _has_more_to_add(self, unsigned int questions_offset, unsigned int answer_offset, unsigned int authority_offset, unsigned int additional_offset)

    cdef void _write_ttl(self, DNSRecord record, double now)

    @cython.locals(
        labels=cython.list,
        label=cython.str,
        index=cython.uint,
        start_size=cython.uint,
        name_length=cython.uint,
    )
    cpdef void write_name(self, cython.str name)

    cdef void _write_link_to_name(self, unsigned int index)

    cpdef void write_short(self, cython.uint value)

    cpdef void write_string(self, cython.bytes value)

    cpdef void write_character_string(self, cython.bytes value)

    @cython.locals(utfstr=bytes)
    cdef void _write_utf(self, cython.str value)

    @cython.locals(
        debug_enable=bint,
        made_progress=bint,
        has_more_to_add=bint,
        questions_offset="unsigned int",
        answer_offset="unsigned int",
        authority_offset="unsigned int",
        additional_offset="unsigned int",
        questions_written="unsigned int",
        answers_written="unsigned int",
        authorities_written="unsigned int",
        additionals_written="unsigned int",
    )
    cpdef packets(self)

    cpdef void add_question(self, DNSQuestion question)

    cpdef void add_answer(self, DNSIncoming inp, DNSRecord record)

    @cython.locals(now_double=double)
    cpdef void add_answer_at_time(self, DNSRecord record, double now)

    cpdef void add_authorative_answer(self, DNSPointer record)

    cpdef void add_additional_answer(self, DNSRecord record)

    cpdef bint is_query(self)

    cpdef bint is_response(self)
07070100000046000081A400000000000000000000000167C7AD160000485E000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_protocol/outgoing.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import enum
import logging
from collections.abc import Sequence
from struct import Struct
from typing import TYPE_CHECKING

from .._dns import DNSPointer, DNSQuestion, DNSRecord
from .._exceptions import NamePartTooLongException
from .._logger import log
from ..const import (
    _CLASS_UNIQUE,
    _DNS_HOST_TTL,
    _DNS_OTHER_TTL,
    _DNS_PACKET_HEADER_LEN,
    _FLAGS_QR_MASK,
    _FLAGS_QR_QUERY,
    _FLAGS_QR_RESPONSE,
    _FLAGS_TC,
    _MAX_MSG_ABSOLUTE,
    _MAX_MSG_TYPICAL,
)
from .incoming import DNSIncoming

str_ = str
float_ = float
int_ = int
bytes_ = bytes
DNSQuestion_ = DNSQuestion
DNSRecord_ = DNSRecord


PACK_BYTE = Struct(">B").pack
PACK_SHORT = Struct(">H").pack
PACK_LONG = Struct(">L").pack

SHORT_CACHE_MAX = 128

BYTE_TABLE = tuple(PACK_BYTE(i) for i in range(256))
SHORT_LOOKUP = tuple(PACK_SHORT(i) for i in range(SHORT_CACHE_MAX))
LONG_LOOKUP = {i: PACK_LONG(i) for i in (_DNS_OTHER_TTL, _DNS_HOST_TTL, 0)}


class State(enum.Enum):
    init = 0
    finished = 1


STATE_INIT = State.init.value
STATE_FINISHED = State.finished.value

LOGGING_IS_ENABLED_FOR = log.isEnabledFor
LOGGING_DEBUG = logging.DEBUG


class DNSOutgoing:
    """Object representation of an outgoing packet"""

    __slots__ = (
        "additionals",
        "allow_long",
        "answers",
        "authorities",
        "data",
        "finished",
        "flags",
        "id",
        "multicast",
        "names",
        "packets_data",
        "questions",
        "size",
        "state",
    )

    def __init__(self, flags: int, multicast: bool = True, id_: int = 0) -> None:
        self.flags = flags
        self.finished = False
        self.id = id_
        self.multicast = multicast
        self.packets_data: list[bytes] = []

        # these 3 are per-packet -- see also _reset_for_next_packet()
        self.names: dict[str, int] = {}
        self.data: list[bytes] = []
        self.size: int = _DNS_PACKET_HEADER_LEN
        self.allow_long: bool = True

        self.state = STATE_INIT

        self.questions: list[DNSQuestion] = []
        self.answers: list[tuple[DNSRecord, float]] = []
        self.authorities: list[DNSPointer] = []
        self.additionals: list[DNSRecord] = []

    def is_query(self) -> bool:
        """Returns true if this is a query."""
        return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY

    def is_response(self) -> bool:
        """Returns true if this is a response."""
        return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE

    def _reset_for_next_packet(self) -> None:
        self.names = {}
        self.data = []
        self.size = _DNS_PACKET_HEADER_LEN
        self.allow_long = True

    def __repr__(self) -> str:
        return "<DNSOutgoing:{}>".format(
            ", ".join(
                [
                    f"multicast={self.multicast}",
                    f"flags={self.flags}",
                    f"questions={self.questions}",
                    f"answers={self.answers}",
                    f"authorities={self.authorities}",
                    f"additionals={self.additionals}",
                ]
            )
        )

    def add_question(self, record: DNSQuestion) -> None:
        """Adds a question"""
        self.questions.append(record)

    def add_answer(self, inp: DNSIncoming, record: DNSRecord) -> None:
        """Adds an answer"""
        if not record.suppressed_by(inp):
            self.add_answer_at_time(record, 0.0)

    def add_answer_at_time(self, record: DNSRecord | None, now: float_) -> None:
        """Adds an answer if it does not expire by a certain time"""
        now_double = now
        if record is not None and (now_double == 0 or not record.is_expired(now_double)):
            self.answers.append((record, now))

    def add_authorative_answer(self, record: DNSPointer) -> None:
        """Adds an authoritative answer"""
        self.authorities.append(record)

    def add_additional_answer(self, record: DNSRecord) -> None:
        """Adds an additional answer

        From: RFC 6763, DNS-Based Service Discovery, February 2013

        12.  DNS Additional Record Generation

           DNS has an efficiency feature whereby a DNS server may place
           additional records in the additional section of the DNS message.
           These additional records are records that the client did not
           explicitly request, but the server has reasonable grounds to expect
           that the client might request them shortly, so including them can
           save the client from having to issue additional queries.

           This section recommends which additional records SHOULD be generated
           to improve network efficiency, for both Unicast and Multicast DNS-SD
           responses.

        12.1.  PTR Records

           When including a DNS-SD Service Instance Enumeration or Selective
           Instance Enumeration (subtype) PTR record in a response packet, the
           server/responder SHOULD include the following additional records:

           o  The SRV record(s) named in the PTR rdata.
           o  The TXT record(s) named in the PTR rdata.
           o  All address records (type "A" and "AAAA") named in the SRV rdata.

        12.2.  SRV Records

           When including an SRV record in a response packet, the
           server/responder SHOULD include the following additional records:

           o  All address records (type "A" and "AAAA") named in the SRV rdata.

        """
        self.additionals.append(record)

    def _write_byte(self, value: int_) -> None:
        """Writes a single byte to the packet"""
        self.data.append(BYTE_TABLE[value])
        self.size += 1

    def _get_short(self, value: int_) -> bytes:
        """Convert an unsigned short to 2 bytes."""
        return SHORT_LOOKUP[value] if value < SHORT_CACHE_MAX else PACK_SHORT(value)

    def _insert_short_at_start(self, value: int_) -> None:
        """Inserts an unsigned short at the start of the packet"""
        self.data.insert(0, self._get_short(value))

    def _replace_short(self, index: int_, value: int_) -> None:
        """Replaces an unsigned short in a certain position in the packet"""
        self.data[index] = self._get_short(value)

    def write_short(self, value: int_) -> None:
        """Writes an unsigned short to the packet"""
        self.data.append(self._get_short(value))
        self.size += 2

    def _write_int(self, value: float | int) -> None:
        """Writes an unsigned integer to the packet"""
        value_as_int = int(value)
        long_bytes = LONG_LOOKUP.get(value_as_int)
        if long_bytes is not None:
            self.data.append(long_bytes)
        else:
            self.data.append(PACK_LONG(value_as_int))
        self.size += 4

    def write_string(self, value: bytes_) -> None:
        """Writes a string to the packet"""
        if TYPE_CHECKING:
            assert isinstance(value, bytes)
        self.data.append(value)
        self.size += len(value)

    def _write_utf(self, s: str_) -> None:
        """Writes a UTF-8 string of a given length to the packet"""
        utfstr = s.encode("utf-8")
        length = len(utfstr)
        if length > 64:
            raise NamePartTooLongException
        self._write_byte(length)
        self.write_string(utfstr)

    def write_character_string(self, value: bytes) -> None:
        if TYPE_CHECKING:
            assert isinstance(value, bytes)
        length = len(value)
        if length > 256:
            raise NamePartTooLongException
        self._write_byte(length)
        self.write_string(value)

    def write_name(self, name: str_) -> None:
        """
        Write names to packet

        18.14. Name Compression

        When generating Multicast DNS messages, implementations SHOULD use
        name compression wherever possible to compress the names of resource
        records, by replacing some or all of the resource record name with a
        compact two-byte reference to an appearance of that data somewhere
        earlier in the message [RFC1035].
        """

        # split name into each label
        if name and name[-1] == ".":
            name = name[:-1]

        index = self.names.get(name, 0)
        if index:
            self._write_link_to_name(index)
            return

        start_size = self.size
        labels = name.split(".")
        # Write each new label or a pointer to the existing one in the packet
        self.names[name] = start_size
        self._write_utf(labels[0])

        name_length = 0
        for count in range(1, len(labels)):
            partial_name = ".".join(labels[count:])
            index = self.names.get(partial_name, 0)
            if index:
                self._write_link_to_name(index)
                return
            if name_length == 0:
                name_length = len(name.encode("utf-8"))
            self.names[partial_name] = start_size + name_length - len(partial_name.encode("utf-8"))
            self._write_utf(labels[count])

        # this is the end of a name
        self._write_byte(0)

    def _write_link_to_name(self, index: int_) -> None:
        # If part of the name already exists in the packet,
        # create a pointer to it
        self._write_byte((index >> 8) | 0xC0)
        self._write_byte(index & 0xFF)

    def _write_question(self, question: DNSQuestion_) -> bool:
        """Writes a question to the packet"""
        start_data_length = len(self.data)
        start_size = self.size
        self.write_name(question.name)
        self.write_short(question.type)
        self._write_record_class(question)
        return self._check_data_limit_or_rollback(start_data_length, start_size)

    def _write_record_class(self, record: DNSQuestion_ | DNSRecord_) -> None:
        """Write out the record class including the unique/unicast (QU) bit."""
        class_ = record.class_
        if record.unique is True and self.multicast:
            self.write_short(class_ | _CLASS_UNIQUE)
        else:
            self.write_short(class_)

    def _write_ttl(self, record: DNSRecord_, now: float_) -> None:
        """Write out the record ttl."""
        self._write_int(record.ttl if now == 0 else record.get_remaining_ttl(now))

    def _write_record(self, record: DNSRecord_, now: float_) -> bool:
        """Writes a record (answer, authoritative answer, additional) to
        the packet.  Returns True on success, or False if we did not
        because the packet because the record does not fit."""
        start_data_length = len(self.data)
        start_size = self.size
        self.write_name(record.name)
        self.write_short(record.type)
        self._write_record_class(record)
        self._write_ttl(record, now)
        index = len(self.data)
        self.write_short(0)  # Will get replaced with the actual size
        record.write(self)
        # Adjust size for the short we will write before this record
        length = 0
        for d in self.data[index + 1 :]:
            length += len(d)
        # Here we replace the 0 length short we wrote
        # before with the actual length
        self._replace_short(index, length)
        return self._check_data_limit_or_rollback(start_data_length, start_size)

    def _check_data_limit_or_rollback(self, start_data_length: int_, start_size: int_) -> bool:
        """Check data limit, if we go over, then rollback and return False."""
        len_limit = _MAX_MSG_ABSOLUTE if self.allow_long else _MAX_MSG_TYPICAL
        self.allow_long = False

        if self.size <= len_limit:
            return True

        if LOGGING_IS_ENABLED_FOR(LOGGING_DEBUG):  # pragma: no branch
            log.debug(
                "Reached data limit (size=%d) > (limit=%d) - rolling back",
                self.size,
                len_limit,
            )
        del self.data[start_data_length:]
        self.size = start_size

        start_size_int = start_size
        rollback_names = [name for name, idx in self.names.items() if idx >= start_size_int]
        for name in rollback_names:
            del self.names[name]
        return False

    def _write_questions_from_offset(self, questions_offset: int_) -> int:
        questions_written = 0
        for question in self.questions[questions_offset:]:
            if not self._write_question(question):
                break
            questions_written += 1
        return questions_written

    def _write_answers_from_offset(self, answer_offset: int_) -> int:
        answers_written = 0
        for answer, time_ in self.answers[answer_offset:]:
            if not self._write_record(answer, time_):
                break
            answers_written += 1
        return answers_written

    def _write_records_from_offset(self, records: Sequence[DNSRecord], offset: int_) -> int:
        records_written = 0
        for record in records[offset:]:
            if not self._write_record(record, 0):
                break
            records_written += 1
        return records_written

    def _has_more_to_add(
        self,
        questions_offset: int_,
        answer_offset: int_,
        authority_offset: int_,
        additional_offset: int_,
    ) -> bool:
        """Check if all questions, answers, authority, and additionals have been written to the packet."""
        return (
            questions_offset < len(self.questions)
            or answer_offset < len(self.answers)
            or authority_offset < len(self.authorities)
            or additional_offset < len(self.additionals)
        )

    def packets(self) -> list[bytes]:
        """Returns a list of bytestrings containing the packets' bytes

        No further parts should be added to the packet once this
        is done.  The packets are each restricted to _MAX_MSG_TYPICAL
        or less in length, except for the case of a single answer which
        will be written out to a single oversized packet no more than
        _MAX_MSG_ABSOLUTE in length (and hence will be subject to IP
        fragmentation potentially)."""
        packets_data = self.packets_data

        if self.state == STATE_FINISHED:
            return packets_data

        questions_offset = 0
        answer_offset = 0
        authority_offset = 0
        additional_offset = 0
        # we have to at least write out the question
        debug_enable = LOGGING_IS_ENABLED_FOR(LOGGING_DEBUG) is True
        has_more_to_add = True

        while has_more_to_add:
            if debug_enable:
                log.debug(
                    "offsets = questions=%d, answers=%d, authorities=%d, additionals=%d",
                    questions_offset,
                    answer_offset,
                    authority_offset,
                    additional_offset,
                )
                log.debug(
                    "lengths = questions=%d, answers=%d, authorities=%d, additionals=%d",
                    len(self.questions),
                    len(self.answers),
                    len(self.authorities),
                    len(self.additionals),
                )

            questions_written = self._write_questions_from_offset(questions_offset)
            answers_written = self._write_answers_from_offset(answer_offset)
            authorities_written = self._write_records_from_offset(self.authorities, authority_offset)
            additionals_written = self._write_records_from_offset(self.additionals, additional_offset)

            made_progress = bool(self.data)

            self._insert_short_at_start(additionals_written)
            self._insert_short_at_start(authorities_written)
            self._insert_short_at_start(answers_written)
            self._insert_short_at_start(questions_written)

            questions_offset += questions_written
            answer_offset += answers_written
            authority_offset += authorities_written
            additional_offset += additionals_written
            if debug_enable:
                log.debug(
                    "now offsets = questions=%d, answers=%d, authorities=%d, additionals=%d",
                    questions_offset,
                    answer_offset,
                    authority_offset,
                    additional_offset,
                )

            has_more_to_add = self._has_more_to_add(
                questions_offset, answer_offset, authority_offset, additional_offset
            )

            if has_more_to_add and self.is_query():
                # https://datatracker.ietf.org/doc/html/rfc6762#section-7.2
                if debug_enable:  # pragma: no branch
                    log.debug("Setting TC flag")
                self._insert_short_at_start(self.flags | _FLAGS_TC)
            else:
                self._insert_short_at_start(self.flags)

            if self.multicast:
                self._insert_short_at_start(0)
            else:
                self._insert_short_at_start(self.id)

            packets_data.append(b"".join(self.data))

            if not made_progress:
                # Generating an empty packet is not a desirable outcome, but currently
                # too many internals rely on this behavior.  So, we'll just return an
                # empty packet and log a warning until this can be refactored at a later
                # date.
                log.warning("packets() made no progress adding records; returning")
                break

            if has_more_to_add:
                self._reset_for_next_packet()

        self.state = STATE_FINISHED
        return packets_data
07070100000047000081A400000000000000000000000167C7AD16000000BD000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/src/zeroconf/_record_update.pxd
import cython

from ._dns cimport DNSRecord


cdef class RecordUpdate:

    cdef public DNSRecord new
    cdef public DNSRecord old

    cdef void _fast_init(self, object new, object old)
07070100000048000081A400000000000000000000000167C7AD160000065C000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/src/zeroconf/_record_update.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from ._dns import DNSRecord

_DNSRecord = DNSRecord


class RecordUpdate:
    __slots__ = ("new", "old")

    def __init__(self, new: DNSRecord, old: DNSRecord | None = None) -> None:
        """RecordUpdate represents a change in a DNS record."""
        self._fast_init(new, old)

    def _fast_init(self, new: _DNSRecord, old: _DNSRecord | None) -> None:
        """Fast init for RecordUpdate."""
        self.new = new
        self.old = old

    def __getitem__(self, index: int) -> DNSRecord | None:
        """Get the new or old record."""
        if index == 0:
            return self.new
        if index == 1:
            return self.old
        raise IndexError(index)
07070100000049000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/src/zeroconf/_services0707010000004A000081A400000000000000000000000167C7AD160000007F000000000000000000000000000000000000003C00000000python-zeroconf-0.146.0/src/zeroconf/_services/__init__.pxd
import cython


cdef class Signal:

    cdef list _handlers

cdef class SignalRegistrationInterface:

    cdef list _handlers
0707010000004B000081A400000000000000000000000167C7AD1600000942000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_services/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import enum
from typing import TYPE_CHECKING, Any, Callable

if TYPE_CHECKING:
    from .._core import Zeroconf


@enum.unique
class ServiceStateChange(enum.Enum):
    Added = 1
    Removed = 2
    Updated = 3


class ServiceListener:
    def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        raise NotImplementedError

    def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        raise NotImplementedError

    def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        raise NotImplementedError


class Signal:
    __slots__ = ("_handlers",)

    def __init__(self) -> None:
        self._handlers: list[Callable[..., None]] = []

    def fire(self, **kwargs: Any) -> None:
        for h in self._handlers[:]:
            h(**kwargs)

    @property
    def registration_interface(self) -> SignalRegistrationInterface:
        return SignalRegistrationInterface(self._handlers)


class SignalRegistrationInterface:
    __slots__ = ("_handlers",)

    def __init__(self, handlers: list[Callable[..., None]]) -> None:
        self._handlers = handlers

    def register_handler(self, handler: Callable[..., None]) -> SignalRegistrationInterface:
        self._handlers.append(handler)
        return self

    def unregister_handler(self, handler: Callable[..., None]) -> SignalRegistrationInterface:
        self._handlers.remove(handler)
        return self
0707010000004C000081A400000000000000000000000167C7AD1600001037000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_services/browser.pxd
import cython

from .._cache cimport DNSCache
from .._history cimport QuestionHistory
from .._protocol.outgoing cimport DNSOutgoing, DNSPointer, DNSQuestion, DNSRecord
from .._record_update cimport RecordUpdate
from .._updates cimport RecordUpdateListener
from .._utils.time cimport current_time_millis, millis_to_seconds
from . cimport Signal, SignalRegistrationInterface


cdef bint TYPE_CHECKING
cdef object cached_possible_types
cdef cython.uint _EXPIRE_REFRESH_TIME_PERCENT, _MAX_MSG_TYPICAL, _DNS_PACKET_HEADER_LEN
cdef cython.uint _TYPE_PTR
cdef object _CLASS_IN
cdef object SERVICE_STATE_CHANGE_ADDED, SERVICE_STATE_CHANGE_REMOVED, SERVICE_STATE_CHANGE_UPDATED
cdef cython.set _ADDRESS_RECORD_TYPES
cdef float RESCUE_RECORD_RETRY_TTL_PERCENTAGE

cdef object _MDNS_PORT, _BROWSER_TIME

cdef object QU_QUESTION

cdef object _FLAGS_QR_QUERY

cdef object heappop, heappush

cdef class _ScheduledPTRQuery:

    cdef public str alias
    cdef public str name
    cdef public unsigned int ttl
    cdef public bint cancelled
    cdef public double expire_time_millis
    cdef public double when_millis

cdef class _DNSPointerOutgoingBucket:

    cdef public double now_millis
    cdef public DNSOutgoing out
    cdef public cython.uint bytes

    cpdef add(self, cython.uint max_compressed_size, DNSQuestion question, cython.set answers)


@cython.locals(cache=DNSCache, question_history=QuestionHistory, record=DNSRecord, qu_question=bint)
cpdef list generate_service_query(
    object zc,
    double now_millis,
    set types_,
    bint multicast,
    object question_type
)


@cython.locals(answer=DNSPointer, query_buckets=list, question=DNSQuestion, max_compressed_size=cython.uint, max_bucket_size=cython.uint, query_bucket=_DNSPointerOutgoingBucket)
cdef list _group_ptr_queries_with_known_answers(double now_millis, bint multicast, cython.dict question_with_known_answers)


cdef class QueryScheduler:

    cdef object _zc
    cdef set _types
    cdef str _addr
    cdef int _port
    cdef bint _multicast
    cdef tuple _first_random_delay_interval
    cdef double _min_time_between_queries_millis
    cdef object _loop
    cdef unsigned int _startup_queries_sent
    cdef public dict _next_scheduled_for_alias
    cdef public list _query_heap
    cdef object _next_run
    cdef double _clock_resolution_millis
    cdef object _question_type

    cdef void _schedule_ptr_refresh(self, DNSPointer pointer, double expire_time_millis, double refresh_time_millis)

    cdef void _schedule_ptr_query(self, _ScheduledPTRQuery scheduled_query)

    @cython.locals(scheduled=_ScheduledPTRQuery)
    cpdef void cancel_ptr_refresh(self, DNSPointer pointer)

    @cython.locals(current=_ScheduledPTRQuery, expire_time=double)
    cpdef void reschedule_ptr_first_refresh(self, DNSPointer pointer)

    @cython.locals(ttl_millis="unsigned int", additional_wait=double, next_query_time=double)
    cpdef void schedule_rescue_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage)

    cpdef void _process_startup_queries(self)

    @cython.locals(query=_ScheduledPTRQuery, next_scheduled=_ScheduledPTRQuery, next_when=double)
    cpdef void _process_ready_types(self)

    cpdef void async_send_ready_queries(self, bint first_request, double now_millis, set ready_types)


cdef class _ServiceBrowserBase(RecordUpdateListener):

    cdef public cython.set types
    cdef public object zc
    cdef DNSCache _cache
    cdef object _loop
    cdef public cython.dict _pending_handlers
    cdef public object _service_state_changed
    cdef public QueryScheduler query_scheduler
    cdef public bint done
    cdef public object _query_sender_task

    cpdef void _enqueue_callback(self, object state_change, object type_, object name)

    @cython.locals(record_update=RecordUpdate, record=DNSRecord, cache=DNSCache, service=DNSRecord, pointer=DNSPointer)
    cpdef void async_update_records(self, object zc, double now, cython.list records)

    cpdef cython.list _names_matching_types(self, object types)

    cpdef _fire_service_state_changed_event(self, cython.tuple event)

    cpdef void async_update_records_complete(self)
0707010000004D000081A400000000000000000000000167C7AD1600007C3B000000000000000000000000000000000000003A00000000python-zeroconf-0.146.0/src/zeroconf/_services/browser.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import heapq
import queue
import random
import threading
import time
import warnings
from collections.abc import Iterable
from functools import partial
from types import TracebackType  # used in type hints
from typing import (
    TYPE_CHECKING,
    Any,
    Callable,
    cast,
)

from .._dns import DNSPointer, DNSQuestion, DNSQuestionType
from .._logger import log
from .._protocol.outgoing import DNSOutgoing
from .._record_update import RecordUpdate
from .._services import (
    ServiceListener,
    ServiceStateChange,
    Signal,
    SignalRegistrationInterface,
)
from .._updates import RecordUpdateListener
from .._utils.name import cached_possible_types, service_type_name
from .._utils.time import current_time_millis, millis_to_seconds
from ..const import (
    _ADDRESS_RECORD_TYPES,
    _BROWSER_TIME,
    _CLASS_IN,
    _DNS_PACKET_HEADER_LEN,
    _EXPIRE_REFRESH_TIME_PERCENT,
    _FLAGS_QR_QUERY,
    _MAX_MSG_TYPICAL,
    _MDNS_ADDR,
    _MDNS_ADDR6,
    _MDNS_PORT,
    _TYPE_PTR,
)

# https://datatracker.ietf.org/doc/html/rfc6762#section-5.2
_FIRST_QUERY_DELAY_RANDOM_INTERVAL = (20, 120)  # ms

_ON_CHANGE_DISPATCH = {
    ServiceStateChange.Added: "add_service",
    ServiceStateChange.Removed: "remove_service",
    ServiceStateChange.Updated: "update_service",
}

SERVICE_STATE_CHANGE_ADDED = ServiceStateChange.Added
SERVICE_STATE_CHANGE_REMOVED = ServiceStateChange.Removed
SERVICE_STATE_CHANGE_UPDATED = ServiceStateChange.Updated

QU_QUESTION = DNSQuestionType.QU

STARTUP_QUERIES = 4

RESCUE_RECORD_RETRY_TTL_PERCENTAGE = 0.1

if TYPE_CHECKING:
    from .._core import Zeroconf

float_ = float
int_ = int
bool_ = bool
str_ = str

_QuestionWithKnownAnswers = dict[DNSQuestion, set[DNSPointer]]

heappop = heapq.heappop
heappush = heapq.heappush


class _ScheduledPTRQuery:
    __slots__ = (
        "alias",
        "cancelled",
        "expire_time_millis",
        "name",
        "ttl",
        "when_millis",
    )

    def __init__(
        self,
        alias: str,
        name: str,
        ttl: int,
        expire_time_millis: float,
        when_millis: float,
    ) -> None:
        """Create a scheduled query."""
        self.alias = alias
        self.name = name
        self.ttl = ttl
        # Since queries are stored in a heap we need to track if they are cancelled
        # so we can remove them from the heap when they are cancelled as it would
        # be too expensive to search the heap for the record to remove and instead
        # we just mark it as cancelled and ignore it when we pop it off the heap
        # when the query is due.
        self.cancelled = False
        # Expire time millis is the actual millisecond time the record will expire
        self.expire_time_millis = expire_time_millis
        # When millis is the millisecond time the query should be sent
        # For the first query this is the refresh time which is 75% of the TTL
        #
        # For subsequent queries we increase the time by 10% of the TTL
        # until we reach the expire time and then we stop because it means
        # we failed to rescue the record.
        self.when_millis = when_millis

    def __repr__(self) -> str:
        """Return a string representation of the scheduled query."""
        return (
            f"<{self.__class__.__name__} "
            f"alias={self.alias} "
            f"name={self.name} "
            f"ttl={self.ttl} "
            f"cancelled={self.cancelled} "
            f"expire_time_millis={self.expire_time_millis} "
            f"when_millis={self.when_millis}"
            ">"
        )

    def __lt__(self, other: _ScheduledPTRQuery) -> bool:
        """Compare two scheduled queries."""
        if type(other) is _ScheduledPTRQuery:
            return self.when_millis < other.when_millis
        return NotImplemented

    def __le__(self, other: _ScheduledPTRQuery) -> bool:
        """Compare two scheduled queries."""
        if type(other) is _ScheduledPTRQuery:
            return self.when_millis < other.when_millis or self.__eq__(other)
        return NotImplemented

    def __eq__(self, other: Any) -> bool:
        """Compare two scheduled queries."""
        if type(other) is _ScheduledPTRQuery:
            return self.when_millis == other.when_millis
        return NotImplemented

    def __ge__(self, other: _ScheduledPTRQuery) -> bool:
        """Compare two scheduled queries."""
        if type(other) is _ScheduledPTRQuery:
            return self.when_millis > other.when_millis or self.__eq__(other)
        return NotImplemented

    def __gt__(self, other: _ScheduledPTRQuery) -> bool:
        """Compare two scheduled queries."""
        if type(other) is _ScheduledPTRQuery:
            return self.when_millis > other.when_millis
        return NotImplemented


class _DNSPointerOutgoingBucket:
    """A DNSOutgoing bucket."""

    __slots__ = ("bytes", "now_millis", "out")

    def __init__(self, now_millis: float, multicast: bool) -> None:
        """Create a bucket to wrap a DNSOutgoing."""
        self.now_millis = now_millis
        self.out = DNSOutgoing(_FLAGS_QR_QUERY, multicast)
        self.bytes = 0

    def add(self, max_compressed_size: int_, question: DNSQuestion, answers: set[DNSPointer]) -> None:
        """Add a new set of questions and known answers to the outgoing."""
        self.out.add_question(question)
        for answer in answers:
            self.out.add_answer_at_time(answer, self.now_millis)
        self.bytes += max_compressed_size


def group_ptr_queries_with_known_answers(
    now: float_,
    multicast: bool_,
    question_with_known_answers: _QuestionWithKnownAnswers,
) -> list[DNSOutgoing]:
    """Aggregate queries so that as many known answers as possible fit in the same packet
    without having known answers spill over into the next packet unless the
    question and known answers are always going to exceed the packet size.

    Some responders do not implement multi-packet known answer suppression
    so we try to keep all the known answers in the same packet as the
    questions.
    """
    return _group_ptr_queries_with_known_answers(now, multicast, question_with_known_answers)


def _group_ptr_queries_with_known_answers(
    now_millis: float_,
    multicast: bool_,
    question_with_known_answers: _QuestionWithKnownAnswers,
) -> list[DNSOutgoing]:
    """Inner wrapper for group_ptr_queries_with_known_answers."""
    # This is the maximum size the query + known answers can be with name compression.
    # The actual size of the query + known answers may be a bit smaller since other
    # parts may be shared when the final DNSOutgoing packets are constructed. The
    # goal of this algorithm is to quickly bucket the query + known answers without
    # the overhead of actually constructing the packets.
    query_by_size: dict[DNSQuestion, int] = {
        question: (question.max_size + sum(answer.max_size_compressed for answer in known_answers))
        for question, known_answers in question_with_known_answers.items()
    }
    max_bucket_size = _MAX_MSG_TYPICAL - _DNS_PACKET_HEADER_LEN
    query_buckets: list[_DNSPointerOutgoingBucket] = []
    for question in sorted(
        query_by_size,
        key=query_by_size.get,  # type: ignore
        reverse=True,
    ):
        max_compressed_size = query_by_size[question]
        answers = question_with_known_answers[question]
        for query_bucket in query_buckets:
            if query_bucket.bytes + max_compressed_size <= max_bucket_size:
                query_bucket.add(max_compressed_size, question, answers)
                break
        else:
            # If a single question and known answers won't fit in a packet
            # we will end up generating multiple packets, but there will never
            # be multiple questions
            query_bucket = _DNSPointerOutgoingBucket(now_millis, multicast)
            query_bucket.add(max_compressed_size, question, answers)
            query_buckets.append(query_bucket)

    return [query_bucket.out for query_bucket in query_buckets]


def generate_service_query(
    zc: Zeroconf,
    now_millis: float_,
    types_: set[str],
    multicast: bool,
    question_type: DNSQuestionType | None,
) -> list[DNSOutgoing]:
    """Generate a service query for sending with zeroconf.send."""
    questions_with_known_answers: _QuestionWithKnownAnswers = {}
    qu_question = not multicast if question_type is None else question_type is QU_QUESTION
    question_history = zc.question_history
    cache = zc.cache
    for type_ in types_:
        question = DNSQuestion(type_, _TYPE_PTR, _CLASS_IN)
        question.unicast = qu_question
        known_answers = {
            record
            for record in cache.get_all_by_details(type_, _TYPE_PTR, _CLASS_IN)
            if not record.is_stale(now_millis)
        }
        if not qu_question and question_history.suppresses(question, now_millis, known_answers):
            log.debug("Asking %s was suppressed by the question history", question)
            continue
        if TYPE_CHECKING:
            pointer_known_answers = cast(set[DNSPointer], known_answers)
        else:
            pointer_known_answers = known_answers
        questions_with_known_answers[question] = pointer_known_answers
        if not qu_question:
            question_history.add_question_at_time(question, now_millis, known_answers)

    return _group_ptr_queries_with_known_answers(now_millis, multicast, questions_with_known_answers)


def _on_change_dispatcher(
    listener: ServiceListener,
    zeroconf: Zeroconf,
    service_type: str,
    name: str,
    state_change: ServiceStateChange,
) -> None:
    """Dispatch a service state change to a listener."""
    getattr(listener, _ON_CHANGE_DISPATCH[state_change])(zeroconf, service_type, name)


def _service_state_changed_from_listener(
    listener: ServiceListener,
) -> Callable[..., None]:
    """Generate a service_state_changed handlers from a listener."""
    assert listener is not None
    if not hasattr(listener, "update_service"):
        warnings.warn(
            f"{listener!r} has no update_service method. Provide one (it can be empty if you "
            "don't care about the updates), it'll become mandatory.",
            FutureWarning,
            stacklevel=1,
        )
    return partial(_on_change_dispatcher, listener)


class QueryScheduler:
    """Schedule outgoing PTR queries for Continuous Multicast DNS Querying

    https://datatracker.ietf.org/doc/html/rfc6762#section-5.2

    """

    __slots__ = (
        "_addr",
        "_clock_resolution_millis",
        "_first_random_delay_interval",
        "_loop",
        "_min_time_between_queries_millis",
        "_multicast",
        "_next_run",
        "_next_scheduled_for_alias",
        "_port",
        "_query_heap",
        "_question_type",
        "_startup_queries_sent",
        "_types",
        "_zc",
    )

    def __init__(
        self,
        zc: Zeroconf,
        types: set[str],
        addr: str | None,
        port: int,
        multicast: bool,
        delay: int,
        first_random_delay_interval: tuple[int, int],
        question_type: DNSQuestionType | None,
    ) -> None:
        self._zc = zc
        self._types = types
        self._addr = addr
        self._port = port
        self._multicast = multicast
        self._first_random_delay_interval = first_random_delay_interval
        self._min_time_between_queries_millis = delay
        self._loop: asyncio.AbstractEventLoop | None = None
        self._startup_queries_sent = 0
        self._next_scheduled_for_alias: dict[str, _ScheduledPTRQuery] = {}
        self._query_heap: list[_ScheduledPTRQuery] = []
        self._next_run: asyncio.TimerHandle | None = None
        self._clock_resolution_millis = time.get_clock_info("monotonic").resolution * 1000
        self._question_type = question_type

    def start(self, loop: asyncio.AbstractEventLoop) -> None:
        """Start the scheduler.

        https://datatracker.ietf.org/doc/html/rfc6762#section-5.2
        To avoid accidental synchronization when, for some reason, multiple
        clients begin querying at exactly the same moment (e.g., because of
        some common external trigger event), a Multicast DNS querier SHOULD
        also delay the first query of the series by a randomly chosen amount
        in the range 20-120 ms.
        """
        start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval))  # noqa: S311
        self._loop = loop
        self._next_run = loop.call_later(start_delay, self._process_startup_queries)

    def stop(self) -> None:
        """Stop the scheduler."""
        if self._next_run is not None:
            self._next_run.cancel()
            self._next_run = None
        self._next_scheduled_for_alias.clear()
        self._query_heap.clear()

    def _schedule_ptr_refresh(
        self,
        pointer: DNSPointer,
        expire_time_millis: float_,
        refresh_time_millis: float_,
    ) -> None:
        """Schedule a query for a pointer."""
        ttl = int(pointer.ttl) if isinstance(pointer.ttl, float) else pointer.ttl
        scheduled_ptr_query = _ScheduledPTRQuery(
            pointer.alias, pointer.name, ttl, expire_time_millis, refresh_time_millis
        )
        self._schedule_ptr_query(scheduled_ptr_query)

    def _schedule_ptr_query(self, scheduled_query: _ScheduledPTRQuery) -> None:
        """Schedule a query for a pointer."""
        self._next_scheduled_for_alias[scheduled_query.alias] = scheduled_query
        heappush(self._query_heap, scheduled_query)

    def cancel_ptr_refresh(self, pointer: DNSPointer) -> None:
        """Cancel a query for a pointer."""
        scheduled = self._next_scheduled_for_alias.pop(pointer.alias, None)
        if scheduled:
            scheduled.cancelled = True

    def reschedule_ptr_first_refresh(self, pointer: DNSPointer) -> None:
        """Reschedule a query for a pointer."""
        current = self._next_scheduled_for_alias.get(pointer.alias)
        refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT)
        if current is not None:
            # If the expire time is within self._min_time_between_queries_millis
            # of the current scheduled time avoid churn by not rescheduling
            if (
                -self._min_time_between_queries_millis
                <= refresh_time_millis - current.when_millis
                <= self._min_time_between_queries_millis
            ):
                return
            current.cancelled = True
            del self._next_scheduled_for_alias[pointer.alias]
        expire_time_millis = pointer.get_expiration_time(100)
        self._schedule_ptr_refresh(pointer, expire_time_millis, refresh_time_millis)

    def schedule_rescue_query(
        self,
        query: _ScheduledPTRQuery,
        now_millis: float_,
        additional_percentage: float_,
    ) -> None:
        """Reschedule a query for a pointer at an additional percentage of expiration."""
        ttl_millis = query.ttl * 1000
        additional_wait = ttl_millis * additional_percentage
        next_query_time = now_millis + additional_wait
        if next_query_time >= query.expire_time_millis:
            # If we would schedule past the expire time
            # there is no point in scheduling as we already
            # tried to rescue the record and failed
            return
        scheduled_ptr_query = _ScheduledPTRQuery(
            query.alias,
            query.name,
            query.ttl,
            query.expire_time_millis,
            next_query_time,
        )
        self._schedule_ptr_query(scheduled_ptr_query)

    def _process_startup_queries(self) -> None:
        if TYPE_CHECKING:
            assert self._loop is not None
        # This is a safety to ensure we stop sending queries if Zeroconf instance
        # is stopped without the browser being cancelled
        if self._zc.done:
            return

        now_millis = current_time_millis()

        # At first we will send STARTUP_QUERIES queries to get the cache populated
        self.async_send_ready_queries(self._startup_queries_sent == 0, now_millis, self._types)
        self._startup_queries_sent += 1

        # Once we finish sending the initial queries we will
        # switch to a strategy of sending queries only when we
        # need to refresh records that are about to expire
        if self._startup_queries_sent >= STARTUP_QUERIES:
            self._next_run = self._loop.call_at(
                millis_to_seconds(now_millis + self._min_time_between_queries_millis),
                self._process_ready_types,
            )
            return

        self._next_run = self._loop.call_later(self._startup_queries_sent**2, self._process_startup_queries)

    def _process_ready_types(self) -> None:
        """Generate a list of ready types that is due and schedule the next time."""
        if TYPE_CHECKING:
            assert self._loop is not None
        # This is a safety to ensure we stop sending queries if Zeroconf instance
        # is stopped without the browser being cancelled
        if self._zc.done:
            return

        now_millis = current_time_millis()
        # Refresh records that are about to expire (aka
        # _EXPIRE_REFRESH_TIME_PERCENT which is currently 75% of the TTL) and
        # additional rescue queries if the 75% query failed to refresh the record
        # with a minimum time between queries of _min_time_between_queries
        # which defaults to 10s

        ready_types: set[str] = set()
        next_scheduled: _ScheduledPTRQuery | None = None
        end_time_millis = now_millis + self._clock_resolution_millis
        schedule_rescue: list[_ScheduledPTRQuery] = []

        while self._query_heap:
            query = self._query_heap[0]
            if query.cancelled:
                heappop(self._query_heap)
                continue
            if query.when_millis > end_time_millis:
                next_scheduled = query
                break
            query = heappop(self._query_heap)
            ready_types.add(query.name)
            del self._next_scheduled_for_alias[query.alias]
            # If there is still more than 10% of the TTL remaining
            # schedule a query again to try to rescue the record
            # from expiring. If the record is refreshed before
            # the query, the query will get cancelled.
            schedule_rescue.append(query)

        for query in schedule_rescue:
            self.schedule_rescue_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE)

        if ready_types:
            self.async_send_ready_queries(False, now_millis, ready_types)

        next_time_millis = now_millis + self._min_time_between_queries_millis

        if next_scheduled is not None and next_scheduled.when_millis > next_time_millis:
            next_when_millis = next_scheduled.when_millis
        else:
            next_when_millis = next_time_millis

        self._next_run = self._loop.call_at(millis_to_seconds(next_when_millis), self._process_ready_types)

    def async_send_ready_queries(
        self, first_request: bool, now_millis: float_, ready_types: set[str]
    ) -> None:
        """Send any ready queries."""
        # If they did not specify and this is the first request, ask QU questions
        # https://datatracker.ietf.org/doc/html/rfc6762#section-5.4 since we are
        # just starting up and we know our cache is likely empty. This ensures
        # the next outgoing will be sent with the known answers list.
        question_type = QU_QUESTION if self._question_type is None and first_request else self._question_type
        outs = generate_service_query(self._zc, now_millis, ready_types, self._multicast, question_type)
        if outs:
            for out in outs:
                self._zc.async_send(out, self._addr, self._port)


class _ServiceBrowserBase(RecordUpdateListener):
    """Base class for ServiceBrowser."""

    __slots__ = (
        "_cache",
        "_loop",
        "_pending_handlers",
        "_query_sender_task",
        "_service_state_changed",
        "done",
        "query_scheduler",
        "types",
        "zc",
    )

    def __init__(
        self,
        zc: Zeroconf,
        type_: str | list,
        handlers: ServiceListener | list[Callable[..., None]] | None = None,
        listener: ServiceListener | None = None,
        addr: str | None = None,
        port: int = _MDNS_PORT,
        delay: int = _BROWSER_TIME,
        question_type: DNSQuestionType | None = None,
    ) -> None:
        """Used to browse for a service for specific type(s).

        Constructor parameters are as follows:

        * `zc`: A Zeroconf instance
        * `type_`: fully qualified service type name
        * `handler`: ServiceListener or Callable that knows how to process ServiceStateChange events
        * `listener`: ServiceListener
        * `addr`: address to send queries (will default to multicast)
        * `port`: port to send queries (will default to mdns 5353)
        * `delay`: The initial delay between answering questions
        * `question_type`: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)

        The listener object will have its add_service() and
        remove_service() methods called when this browser
        discovers changes in the services availability.
        """
        assert handlers or listener, "You need to specify at least one handler"
        self.types: set[str] = set(type_ if isinstance(type_, list) else [type_])
        for check_type_ in self.types:
            # Will generate BadTypeInNameException on a bad name
            service_type_name(check_type_, strict=False)
        self.zc = zc
        self._cache = zc.cache
        assert zc.loop is not None
        self._loop = zc.loop
        self._pending_handlers: dict[tuple[str, str], ServiceStateChange] = {}
        self._service_state_changed = Signal()
        self.query_scheduler = QueryScheduler(
            zc,
            self.types,
            addr,
            port,
            addr in (None, _MDNS_ADDR, _MDNS_ADDR6),
            delay,
            _FIRST_QUERY_DELAY_RANDOM_INTERVAL,
            question_type,
        )
        self.done = False
        self._query_sender_task: asyncio.Task | None = None

        if hasattr(handlers, "add_service"):
            listener = cast(ServiceListener, handlers)
            handlers = None

        handlers = cast(list[Callable[..., None]], handlers or [])

        if listener:
            handlers.append(_service_state_changed_from_listener(listener))

        for h in handlers:
            self.service_state_changed.register_handler(h)

    def _async_start(self) -> None:
        """Generate the next time and setup listeners.

        Must be called by uses of this base class after they
        have finished setting their properties.
        """
        self.zc.async_add_listener(self, [DNSQuestion(type_, _TYPE_PTR, _CLASS_IN) for type_ in self.types])
        # Only start queries after the listener is installed
        self._query_sender_task = asyncio.ensure_future(self._async_start_query_sender())

    @property
    def service_state_changed(self) -> SignalRegistrationInterface:
        return self._service_state_changed.registration_interface

    def _names_matching_types(self, names: Iterable[str]) -> list[tuple[str, str]]:
        """Return the type and name for records matching the types we are browsing."""
        return [
            (type_, name) for name in names for type_ in self.types.intersection(cached_possible_types(name))
        ]

    def _enqueue_callback(
        self,
        state_change: ServiceStateChange,
        type_: str_,
        name: str_,
    ) -> None:
        # Code to ensure we only do a single update message
        # Precedence is; Added, Remove, Update
        key = (name, type_)
        if (
            state_change is SERVICE_STATE_CHANGE_ADDED
            or (
                state_change is SERVICE_STATE_CHANGE_REMOVED
                and self._pending_handlers.get(key) is not SERVICE_STATE_CHANGE_ADDED
            )
            or (state_change is SERVICE_STATE_CHANGE_UPDATED and key not in self._pending_handlers)
        ):
            self._pending_handlers[key] = state_change

    def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None:
        """Callback invoked by Zeroconf when new information arrives.

        Updates information required by browser in the Zeroconf cache.

        Ensures that there is are no unnecessary duplicates in the list.

        This method will be run in the event loop.
        """
        for record_update in records:
            record = record_update.new
            old_record = record_update.old
            record_type = record.type

            if record_type is _TYPE_PTR:
                if TYPE_CHECKING:
                    record = cast(DNSPointer, record)
                pointer = record
                for type_ in self.types.intersection(cached_possible_types(pointer.name)):
                    if old_record is None:
                        self._enqueue_callback(SERVICE_STATE_CHANGE_ADDED, type_, pointer.alias)
                        self.query_scheduler.reschedule_ptr_first_refresh(pointer)
                    elif pointer.is_expired(now):
                        self._enqueue_callback(SERVICE_STATE_CHANGE_REMOVED, type_, pointer.alias)
                        self.query_scheduler.cancel_ptr_refresh(pointer)
                    else:
                        self.query_scheduler.reschedule_ptr_first_refresh(pointer)
                continue

            # If its expired or already exists in the cache it cannot be updated.
            if old_record is not None or record.is_expired(now):
                continue

            if record_type in _ADDRESS_RECORD_TYPES:
                cache = self._cache
                names = {service.name for service in cache.async_entries_with_server(record.name)}
                # Iterate through the DNSCache and callback any services that use this address
                for type_, name in self._names_matching_types(names):
                    self._enqueue_callback(SERVICE_STATE_CHANGE_UPDATED, type_, name)
                continue

            for type_, name in self._names_matching_types((record.name,)):
                self._enqueue_callback(SERVICE_STATE_CHANGE_UPDATED, type_, name)

    def async_update_records_complete(self) -> None:
        """Called when a record update has completed for all handlers.

        At this point the cache will have the new records.

        This method will be run in the event loop.

        This method is expected to be overridden by subclasses.
        """
        for pending in self._pending_handlers.items():
            self._fire_service_state_changed_event(pending)
        self._pending_handlers.clear()

    def _fire_service_state_changed_event(self, event: tuple[tuple[str, str], ServiceStateChange]) -> None:
        """Fire a service state changed event.

        When running with ServiceBrowser, this will happen in the dedicated
        thread.

        When running with AsyncServiceBrowser, this will happen in the event loop.
        """
        name_type = event[0]
        state_change = event[1]
        self._service_state_changed.fire(
            zeroconf=self.zc,
            service_type=name_type[1],
            name=name_type[0],
            state_change=state_change,
        )

    def _async_cancel(self) -> None:
        """Cancel the browser."""
        self.done = True
        self.query_scheduler.stop()
        self.zc.async_remove_listener(self)
        assert self._query_sender_task is not None, "Attempted to cancel a browser that was not started"
        self._query_sender_task.cancel()
        self._query_sender_task = None

    async def _async_start_query_sender(self) -> None:
        """Start scheduling queries."""
        if not self.zc.started:
            await self.zc.async_wait_for_start()
        self.query_scheduler.start(self._loop)


class ServiceBrowser(_ServiceBrowserBase, threading.Thread):
    """Used to browse for a service of a specific type.

    The listener object will have its add_service() and
    remove_service() methods called when this browser
    discovers changes in the services availability."""

    def __init__(
        self,
        zc: Zeroconf,
        type_: str | list,
        handlers: ServiceListener | list[Callable[..., None]] | None = None,
        listener: ServiceListener | None = None,
        addr: str | None = None,
        port: int = _MDNS_PORT,
        delay: int = _BROWSER_TIME,
        question_type: DNSQuestionType | None = None,
    ) -> None:
        assert zc.loop is not None
        if not zc.loop.is_running():
            raise RuntimeError("The event loop is not running")
        threading.Thread.__init__(self)
        super().__init__(zc, type_, handlers, listener, addr, port, delay, question_type)
        # Add the queue before the listener is installed in _setup
        # to ensure that events run in the dedicated thread and do
        # not block the event loop
        self.queue: queue.SimpleQueue = queue.SimpleQueue()
        self.daemon = True
        self.start()
        zc.loop.call_soon_threadsafe(self._async_start)
        self.name = "zeroconf-ServiceBrowser-{}-{}".format(
            "-".join([type_[:-7] for type_ in self.types]),
            getattr(self, "native_id", self.ident),
        )

    def cancel(self) -> None:
        """Cancel the browser."""
        assert self.zc.loop is not None
        self.queue.put(None)
        self.zc.loop.call_soon_threadsafe(self._async_cancel)
        self.join()

    def run(self) -> None:
        """Run the browser thread."""
        while True:
            event = self.queue.get()
            if event is None:
                return
            self._fire_service_state_changed_event(event)

    def async_update_records_complete(self) -> None:
        """Called when a record update has completed for all handlers.

        At this point the cache will have the new records.

        This method will be run in the event loop.
        """
        for pending in self._pending_handlers.items():
            self.queue.put(pending)
        self._pending_handlers.clear()

    def __enter__(self) -> ServiceBrowser:
        return self

    def __exit__(  # pylint: disable=useless-return
        self,
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    ) -> bool | None:
        self.cancel()
        return None
0707010000004E000081A400000000000000000000000167C7AD16000013B0000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/src/zeroconf/_services/info.pxd
import cython

from .._cache cimport DNSCache
from .._dns cimport (
    DNSAddress,
    DNSNsec,
    DNSPointer,
    DNSQuestion,
    DNSRecord,
    DNSService,
    DNSText,
)
from .._history cimport QuestionHistory
from .._protocol.outgoing cimport DNSOutgoing
from .._record_update cimport RecordUpdate
from .._updates cimport RecordUpdateListener
from .._utils.ipaddress cimport (
    get_ip_address_object_from_record,
    ip_bytes_and_scope_to_address,
    str_without_scope_id,
)
from .._utils.time cimport current_time_millis

cdef cython.set _TYPE_AAAA_RECORDS
cdef cython.set _TYPE_A_RECORDS
cdef cython.set _TYPE_A_AAAA_RECORDS

cdef object _resolve_all_futures_to_none

cdef object _TYPE_SRV
cdef object _TYPE_TXT
cdef object _TYPE_A
cdef object _TYPE_AAAA
cdef object _TYPE_PTR
cdef object _TYPE_NSEC
cdef object _CLASS_IN
cdef object _FLAGS_QR_QUERY

cdef object service_type_name

cdef object QU_QUESTION
cdef object QM_QUESTION

cdef object _IPVersion_All_value
cdef object _IPVersion_V4Only_value

cdef cython.set _ADDRESS_RECORD_TYPES

cdef unsigned int _DUPLICATE_QUESTION_INTERVAL

cdef bint TYPE_CHECKING
cdef object cached_ip_addresses

cdef object randint

cdef class ServiceInfo(RecordUpdateListener):

    cdef public cython.bytes text
    cdef public str type
    cdef str _name
    cdef public str key
    cdef public cython.list _ipv4_addresses
    cdef public cython.list _ipv6_addresses
    cdef public object port
    cdef public object weight
    cdef public object priority
    cdef public str server
    cdef public str server_key
    cdef public cython.dict _properties
    cdef public cython.dict _decoded_properties
    cdef public object host_ttl
    cdef public object other_ttl
    cdef public object interface_index
    cdef public cython.set _new_records_futures
    cdef public DNSPointer _dns_pointer_cache
    cdef public DNSService _dns_service_cache
    cdef public DNSText _dns_text_cache
    cdef public cython.list _dns_address_cache
    cdef public cython.set _get_address_and_nsec_records_cache
    cdef public cython.set _query_record_types

    @cython.locals(record_update=RecordUpdate, update=bint, cache=DNSCache)
    cpdef void async_update_records(self, object zc, double now, cython.list records)

    @cython.locals(cache=DNSCache)
    cpdef bint _load_from_cache(self, object zc, double now)

    @cython.locals(length="unsigned char", index="unsigned int", key_value=bytes, key_sep_value=tuple)
    cdef void _unpack_text_into_properties(self)

    @cython.locals(k=bytes, v=bytes)
    cdef void _generate_decoded_properties(self)

    @cython.locals(properties_contain_str=bint)
    cpdef void _set_properties(self, cython.dict properties)

    cdef void _set_text(self, cython.bytes text)

    @cython.locals(record=DNSAddress)
    cdef _get_ip_addresses_from_cache_lifo(self, object zc, double now, object type)

    @cython.locals(
        dns_service_record=DNSService,
        dns_text_record=DNSText,
        dns_address_record=DNSAddress
    )
    cdef bint _process_record_threadsafe(self, object zc, DNSRecord record, double now)

    @cython.locals(cache=DNSCache)
    cdef cython.list _get_address_records_from_cache_by_type(self, object zc, object _type)

    cdef void _set_ipv4_addresses_from_cache(self, object zc, double now)

    cdef void _set_ipv6_addresses_from_cache(self, object zc, double now)

    cdef cython.list _ip_addresses_by_version_value(self, object version_value)

    cpdef addresses_by_version(self, object version)

    cpdef ip_addresses_by_version(self, object version)

    @cython.locals(cacheable=cython.bint)
    cdef cython.list _dns_addresses(self, object override_ttls, object version)

    @cython.locals(cacheable=cython.bint)
    cdef DNSPointer _dns_pointer(self, object override_ttl)

    @cython.locals(cacheable=cython.bint)
    cdef DNSService _dns_service(self, object override_ttl)

    @cython.locals(cacheable=cython.bint)
    cdef DNSText _dns_text(self, object override_ttl)

    cdef DNSNsec _dns_nsec(self, cython.list missing_types, object override_ttl)

    @cython.locals(cacheable=cython.bint)
    cdef cython.set _get_address_and_nsec_records(self, object override_ttl)

    cpdef void async_clear_cache(self)

    @cython.locals(cache=DNSCache, history=QuestionHistory, out=DNSOutgoing, qu_question=bint)
    cdef DNSOutgoing _generate_request_query(self, object zc, double now, object question_type)

    @cython.locals(question=DNSQuestion, answer=DNSRecord)
    cdef void _add_question_with_known_answers(
        self,
        DNSOutgoing out,
        bint qu_question,
        QuestionHistory question_history,
        DNSCache cache,
        double now,
        str name,
        object type_,
        object class_,
        bint skip_if_known_answers
    )

    cdef double _get_initial_delay(self)

    cdef double _get_random_delay(self)

cdef class AddressResolver(ServiceInfo):
    pass

cdef class AddressResolverIPv6(ServiceInfo):
    pass

cdef class AddressResolverIPv4(ServiceInfo):
    pass
0707010000004F000081A400000000000000000000000167C7AD1600009792000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/src/zeroconf/_services/info.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import random
from typing import TYPE_CHECKING, cast

from .._cache import DNSCache
from .._dns import (
    DNSAddress,
    DNSNsec,
    DNSPointer,
    DNSQuestion,
    DNSQuestionType,
    DNSRecord,
    DNSService,
    DNSText,
)
from .._exceptions import BadTypeInNameException
from .._history import QuestionHistory
from .._logger import log
from .._protocol.outgoing import DNSOutgoing
from .._record_update import RecordUpdate
from .._updates import RecordUpdateListener
from .._utils.asyncio import (
    _resolve_all_futures_to_none,
    get_running_loop,
    run_coro_with_timeout,
    wait_for_future_set_or_timeout,
)
from .._utils.ipaddress import (
    ZeroconfIPv4Address,
    ZeroconfIPv6Address,
    cached_ip_addresses,
    get_ip_address_object_from_record,
    ip_bytes_and_scope_to_address,
    str_without_scope_id,
)
from .._utils.name import service_type_name
from .._utils.net import IPVersion, _encode_address
from .._utils.time import current_time_millis
from ..const import (
    _ADDRESS_RECORD_TYPES,
    _CLASS_IN,
    _CLASS_IN_UNIQUE,
    _DNS_HOST_TTL,
    _DNS_OTHER_TTL,
    _DUPLICATE_QUESTION_INTERVAL,
    _FLAGS_QR_QUERY,
    _LISTENER_TIME,
    _MDNS_PORT,
    _TYPE_A,
    _TYPE_AAAA,
    _TYPE_NSEC,
    _TYPE_PTR,
    _TYPE_SRV,
    _TYPE_TXT,
)

_IPVersion_All_value = IPVersion.All.value
_IPVersion_V4Only_value = IPVersion.V4Only.value
# https://datatracker.ietf.org/doc/html/rfc6762#section-5.2
# The most common case for calling ServiceInfo is from a
# ServiceBrowser. After the first request we add a few random
# milliseconds to the delay between requests to reduce the chance
# that there are multiple ServiceBrowser callbacks running on
# the network that are firing at the same time when they
# see the same multicast response and decide to refresh
# the A/AAAA/SRV records for a host.
_AVOID_SYNC_DELAY_RANDOM_INTERVAL = (20, 120)

_TYPE_AAAA_RECORDS = {_TYPE_AAAA}
_TYPE_A_RECORDS = {_TYPE_A}
_TYPE_A_AAAA_RECORDS = {_TYPE_A, _TYPE_AAAA}

bytes_ = bytes
float_ = float
int_ = int
str_ = str

QU_QUESTION = DNSQuestionType.QU
QM_QUESTION = DNSQuestionType.QM

randint = random.randint

if TYPE_CHECKING:
    from .._core import Zeroconf


def instance_name_from_service_info(info: ServiceInfo, strict: bool = True) -> str:
    """Calculate the instance name from the ServiceInfo."""
    # This is kind of funky because of the subtype based tests
    # need to make subtypes a first class citizen
    service_name = service_type_name(info.name, strict=strict)
    if not info.type.endswith(service_name):
        raise BadTypeInNameException
    return info.name[: -len(service_name) - 1]


class ServiceInfo(RecordUpdateListener):
    """Service information.

    Constructor parameters are as follows:

    * `type_`: fully qualified service type name
    * `name`: fully qualified service name
    * `port`: port that the service runs on
    * `weight`: weight of the service
    * `priority`: priority of the service
    * `properties`: dictionary of properties (or a bytes object holding the contents of the `text` field).
      converted to str and then encoded to bytes using UTF-8. Keys with `None` values are converted to
      value-less attributes.
    * `server`: fully qualified name for service host (defaults to name)
    * `host_ttl`: ttl used for A/SRV records
    * `other_ttl`: ttl used for PTR/TXT records
    * `addresses` and `parsed_addresses`: List of IP addresses (either as bytes, network byte order,
      or in parsed form as text; at most one of those parameters can be provided)
    * interface_index: scope_id or zone_id for IPv6 link-local addresses i.e. an identifier of the interface
      where the peer is connected to
    """

    __slots__ = (
        "_decoded_properties",
        "_dns_address_cache",
        "_dns_pointer_cache",
        "_dns_service_cache",
        "_dns_text_cache",
        "_get_address_and_nsec_records_cache",
        "_ipv4_addresses",
        "_ipv6_addresses",
        "_name",
        "_new_records_futures",
        "_properties",
        "_query_record_types",
        "host_ttl",
        "interface_index",
        "key",
        "other_ttl",
        "port",
        "priority",
        "server",
        "server_key",
        "text",
        "type",
        "weight",
    )

    def __init__(
        self,
        type_: str,
        name: str,
        port: int | None = None,
        weight: int = 0,
        priority: int = 0,
        properties: bytes | dict = b"",
        server: str | None = None,
        host_ttl: int = _DNS_HOST_TTL,
        other_ttl: int = _DNS_OTHER_TTL,
        *,
        addresses: list[bytes] | None = None,
        parsed_addresses: list[str] | None = None,
        interface_index: int | None = None,
    ) -> None:
        # Accept both none, or one, but not both.
        if addresses is not None and parsed_addresses is not None:
            raise TypeError("addresses and parsed_addresses cannot be provided together")
        if not type_.endswith(service_type_name(name, strict=False)):
            raise BadTypeInNameException
        self.interface_index = interface_index
        self.text = b""
        self.type = type_
        self._name = name
        self.key = name.lower()
        self._ipv4_addresses: list[ZeroconfIPv4Address] = []
        self._ipv6_addresses: list[ZeroconfIPv6Address] = []
        if addresses is not None:
            self.addresses = addresses
        elif parsed_addresses is not None:
            self.addresses = [_encode_address(a) for a in parsed_addresses]
        self.port = port
        self.weight = weight
        self.priority = priority
        self.server = server if server else None
        self.server_key = server.lower() if server else None
        self._properties: dict[bytes, bytes | None] | None = None
        self._decoded_properties: dict[str, str | None] | None = None
        if isinstance(properties, bytes):
            self._set_text(properties)
        else:
            self._set_properties(properties)
        self.host_ttl = host_ttl
        self.other_ttl = other_ttl
        self._new_records_futures: set[asyncio.Future] | None = None
        self._dns_address_cache: list[DNSAddress] | None = None
        self._dns_pointer_cache: DNSPointer | None = None
        self._dns_service_cache: DNSService | None = None
        self._dns_text_cache: DNSText | None = None
        self._get_address_and_nsec_records_cache: set[DNSRecord] | None = None
        self._query_record_types = {_TYPE_SRV, _TYPE_TXT, _TYPE_A, _TYPE_AAAA}

    @property
    def name(self) -> str:
        """The name of the service."""
        return self._name

    @name.setter
    def name(self, name: str) -> None:
        """Replace the name and reset the key."""
        self._name = name
        self.key = name.lower()
        self._dns_service_cache = None
        self._dns_pointer_cache = None
        self._dns_text_cache = None

    @property
    def addresses(self) -> list[bytes]:
        """IPv4 addresses of this service.

        Only IPv4 addresses are returned for backward compatibility.
        Use :meth:`addresses_by_version` or :meth:`parsed_addresses` to
        include IPv6 addresses as well.
        """
        return self.addresses_by_version(IPVersion.V4Only)

    @addresses.setter
    def addresses(self, value: list[bytes]) -> None:
        """Replace the addresses list.

        This replaces all currently stored addresses, both IPv4 and IPv6.
        """
        self._ipv4_addresses.clear()
        self._ipv6_addresses.clear()
        self._dns_address_cache = None
        self._get_address_and_nsec_records_cache = None

        for address in value:
            if len(address) == 16 and self.interface_index is not None:
                addr = ip_bytes_and_scope_to_address(address, self.interface_index)
            else:
                addr = cached_ip_addresses(address)
            if addr is None:
                raise TypeError(
                    "Addresses must either be IPv4 or IPv6 strings, bytes, or integers;"
                    f" got {address!r}. Hint: convert string addresses with socket.inet_pton"
                )
            if addr.version == 4:
                if TYPE_CHECKING:
                    assert isinstance(addr, ZeroconfIPv4Address)
                self._ipv4_addresses.append(addr)
            else:
                if TYPE_CHECKING:
                    assert isinstance(addr, ZeroconfIPv6Address)
                self._ipv6_addresses.append(addr)

    @property
    def properties(self) -> dict[bytes, bytes | None]:
        """Return properties as bytes."""
        if self._properties is None:
            self._unpack_text_into_properties()
        if TYPE_CHECKING:
            assert self._properties is not None
        return self._properties

    @property
    def decoded_properties(self) -> dict[str, str | None]:
        """Return properties as strings."""
        if self._decoded_properties is None:
            self._generate_decoded_properties()
        if TYPE_CHECKING:
            assert self._decoded_properties is not None
        return self._decoded_properties

    def async_clear_cache(self) -> None:
        """Clear the cache for this service info."""
        self._dns_address_cache = None
        self._dns_pointer_cache = None
        self._dns_service_cache = None
        self._dns_text_cache = None
        self._get_address_and_nsec_records_cache = None

    async def async_wait(self, timeout: float, loop: asyncio.AbstractEventLoop | None = None) -> None:
        """Calling task waits for a given number of milliseconds or until notified."""
        if not self._new_records_futures:
            self._new_records_futures = set()
        await wait_for_future_set_or_timeout(
            loop or asyncio.get_running_loop(), self._new_records_futures, timeout
        )

    def addresses_by_version(self, version: IPVersion) -> list[bytes]:
        """List addresses matching IP version.

        Addresses are guaranteed to be returned in LIFO (last in, first out)
        order with IPv4 addresses first and IPv6 addresses second.

        This means the first address will always be the most recently added
        address of the given IP version.
        """
        version_value = version.value
        if version_value == _IPVersion_All_value:
            ip_v4_packed = [addr.packed for addr in self._ipv4_addresses]
            ip_v6_packed = [addr.packed for addr in self._ipv6_addresses]
            return [*ip_v4_packed, *ip_v6_packed]
        if version_value == _IPVersion_V4Only_value:
            return [addr.packed for addr in self._ipv4_addresses]
        return [addr.packed for addr in self._ipv6_addresses]

    def ip_addresses_by_version(
        self, version: IPVersion
    ) -> list[ZeroconfIPv4Address] | list[ZeroconfIPv6Address]:
        """List ip_address objects matching IP version.

        Addresses are guaranteed to be returned in LIFO (last in, first out)
        order with IPv4 addresses first and IPv6 addresses second.

        This means the first address will always be the most recently added
        address of the given IP version.
        """
        return self._ip_addresses_by_version_value(version.value)

    def _ip_addresses_by_version_value(
        self, version_value: int_
    ) -> list[ZeroconfIPv4Address] | list[ZeroconfIPv6Address]:
        """Backend for addresses_by_version that uses the raw value."""
        if version_value == _IPVersion_All_value:
            return [*self._ipv4_addresses, *self._ipv6_addresses]  # type: ignore[return-value]
        if version_value == _IPVersion_V4Only_value:
            return self._ipv4_addresses
        return self._ipv6_addresses

    def parsed_addresses(self, version: IPVersion = IPVersion.All) -> list[str]:
        """List addresses in their parsed string form.

        Addresses are guaranteed to be returned in LIFO (last in, first out)
        order with IPv4 addresses first and IPv6 addresses second.

        This means the first address will always be the most recently added
        address of the given IP version.
        """
        return [str_without_scope_id(addr) for addr in self._ip_addresses_by_version_value(version.value)]

    def parsed_scoped_addresses(self, version: IPVersion = IPVersion.All) -> list[str]:
        """Equivalent to parsed_addresses, with the exception that IPv6 Link-Local
        addresses are qualified with %<interface_index> when available

        Addresses are guaranteed to be returned in LIFO (last in, first out)
        order with IPv4 addresses first and IPv6 addresses second.

        This means the first address will always be the most recently added
        address of the given IP version.
        """
        return [str(addr) for addr in self._ip_addresses_by_version_value(version.value)]

    def _set_properties(self, properties: dict[str | bytes, str | bytes | None]) -> None:
        """Sets properties and text of this info from a dictionary"""
        list_: list[bytes] = []
        properties_contain_str = False
        result = b""
        for key, value in properties.items():
            if isinstance(key, str):
                key = key.encode("utf-8")  # noqa: PLW2901
                properties_contain_str = True

            record = key
            if value is not None:
                if not isinstance(value, bytes):
                    value = str(value).encode("utf-8")  # noqa: PLW2901
                    properties_contain_str = True
                record += b"=" + value
            list_.append(record)
        for item in list_:
            result = b"".join((result, bytes((len(item),)), item))
        if not properties_contain_str:
            # If there are no str keys or values, we can use the properties
            # as-is, without decoding them, otherwise calling
            # self.properties will lazy decode them, which is expensive.
            if TYPE_CHECKING:
                self._properties = cast(dict[bytes, bytes | None], properties)
            else:
                self._properties = properties
        self.text = result

    def _set_text(self, text: bytes) -> None:
        """Sets properties and text given a text field"""
        if text == self.text:
            return
        self.text = text
        # Clear the properties cache
        self._properties = None
        self._decoded_properties = None

    def _generate_decoded_properties(self) -> None:
        """Generates decoded properties from the properties"""
        self._decoded_properties = {
            k.decode("ascii", "replace"): None if v is None else v.decode("utf-8", "replace")
            for k, v in self.properties.items()
        }

    def _unpack_text_into_properties(self) -> None:
        """Unpacks the text field into properties"""
        text = self.text
        end = len(text)
        if end == 0:
            # Properties should be set atomically
            # in case another thread is reading them
            self._properties = {}
            return

        index = 0
        properties: dict[bytes, bytes | None] = {}
        while index < end:
            length = text[index]
            index += 1
            key_value = text[index : index + length]
            key_sep_value = key_value.partition(b"=")
            key = key_sep_value[0]
            if key not in properties:
                properties[key] = key_sep_value[2] or None
            index += length

        self._properties = properties

    def get_name(self) -> str:
        """Name accessor"""
        return self._name[: len(self._name) - len(self.type) - 1]

    def _get_ip_addresses_from_cache_lifo(
        self, zc: Zeroconf, now: float_, type: int_
    ) -> list[ZeroconfIPv4Address | ZeroconfIPv6Address]:
        """Set IPv6 addresses from the cache."""
        address_list: list[ZeroconfIPv4Address | ZeroconfIPv6Address] = []
        for record in self._get_address_records_from_cache_by_type(zc, type):
            if record.is_expired(now):
                continue
            ip_addr = get_ip_address_object_from_record(record)
            if ip_addr is not None and ip_addr not in address_list:
                address_list.append(ip_addr)
        address_list.reverse()  # Reverse to get LIFO order
        return address_list

    def _set_ipv6_addresses_from_cache(self, zc: Zeroconf, now: float_) -> None:
        """Set IPv6 addresses from the cache."""
        if TYPE_CHECKING:
            self._ipv6_addresses = cast(
                list[ZeroconfIPv6Address],
                self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_AAAA),
            )
        else:
            self._ipv6_addresses = self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_AAAA)

    def _set_ipv4_addresses_from_cache(self, zc: Zeroconf, now: float_) -> None:
        """Set IPv4 addresses from the cache."""
        if TYPE_CHECKING:
            self._ipv4_addresses = cast(
                list[ZeroconfIPv4Address],
                self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_A),
            )
        else:
            self._ipv4_addresses = self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_A)

    def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None:
        """Updates service information from a DNS record.

        This method will be run in the event loop.
        """
        new_records_futures = self._new_records_futures
        updated: bool = False
        for record_update in records:
            updated |= self._process_record_threadsafe(zc, record_update.new, now)
        if updated and new_records_futures:
            _resolve_all_futures_to_none(new_records_futures)

    def _process_record_threadsafe(self, zc: Zeroconf, record: DNSRecord, now: float_) -> bool:
        """Thread safe record updating.

        Returns True if a new record was added.
        """
        if record.is_expired(now):
            return False

        record_key = record.key
        record_type = type(record)
        if record_type is DNSAddress and record_key == self.server_key:
            dns_address_record = record
            if TYPE_CHECKING:
                assert isinstance(dns_address_record, DNSAddress)
            ip_addr = get_ip_address_object_from_record(dns_address_record)
            if ip_addr is None:
                log.warning(
                    "Encountered invalid address while processing %s: %s",
                    dns_address_record,
                    dns_address_record.address,
                )
                return False

            if ip_addr.version == 4:
                if TYPE_CHECKING:
                    assert isinstance(ip_addr, ZeroconfIPv4Address)
                ipv4_addresses = self._ipv4_addresses
                if ip_addr not in ipv4_addresses:
                    ipv4_addresses.insert(0, ip_addr)
                    return True
                # Use int() to compare the addresses as integers
                # since by default IPv4Address.__eq__ compares the
                # the addresses on version and int which more than
                # we need here since we know the version is 4.
                if ip_addr.zc_integer != ipv4_addresses[0].zc_integer:
                    ipv4_addresses.remove(ip_addr)
                    ipv4_addresses.insert(0, ip_addr)

                return False

            if TYPE_CHECKING:
                assert isinstance(ip_addr, ZeroconfIPv6Address)
            ipv6_addresses = self._ipv6_addresses
            if ip_addr not in self._ipv6_addresses:
                ipv6_addresses.insert(0, ip_addr)
                return True
            # Use int() to compare the addresses as integers
            # since by default IPv6Address.__eq__ compares the
            # the addresses on version and int which more than
            # we need here since we know the version is 6.
            if ip_addr.zc_integer != self._ipv6_addresses[0].zc_integer:
                ipv6_addresses.remove(ip_addr)
                ipv6_addresses.insert(0, ip_addr)

            return False

        if record_key != self.key:
            return False

        if record_type is DNSText:
            dns_text_record = record
            if TYPE_CHECKING:
                assert isinstance(dns_text_record, DNSText)
            self._set_text(dns_text_record.text)
            return True

        if record_type is DNSService:
            dns_service_record = record
            if TYPE_CHECKING:
                assert isinstance(dns_service_record, DNSService)
            old_server_key = self.server_key
            self._name = dns_service_record.name
            self.key = dns_service_record.key
            self.server = dns_service_record.server
            self.server_key = dns_service_record.server_key
            self.port = dns_service_record.port
            self.weight = dns_service_record.weight
            self.priority = dns_service_record.priority
            if old_server_key != self.server_key:
                self._set_ipv4_addresses_from_cache(zc, now)
                self._set_ipv6_addresses_from_cache(zc, now)
            return True

        return False

    def dns_addresses(
        self,
        override_ttl: int | None = None,
        version: IPVersion = IPVersion.All,
    ) -> list[DNSAddress]:
        """Return matching DNSAddress from ServiceInfo."""
        return self._dns_addresses(override_ttl, version)

    def _dns_addresses(
        self,
        override_ttl: int | None,
        version: IPVersion,
    ) -> list[DNSAddress]:
        """Return matching DNSAddress from ServiceInfo."""
        cacheable = version is IPVersion.All and override_ttl is None
        if self._dns_address_cache is not None and cacheable:
            return self._dns_address_cache
        name = self.server or self._name
        ttl = override_ttl if override_ttl is not None else self.host_ttl
        class_ = _CLASS_IN_UNIQUE
        version_value = version.value
        records = [
            DNSAddress(
                name,
                _TYPE_AAAA if ip_addr.version == 6 else _TYPE_A,
                class_,
                ttl,
                ip_addr.packed,
                created=0.0,
            )
            for ip_addr in self._ip_addresses_by_version_value(version_value)
        ]
        if cacheable:
            self._dns_address_cache = records
        return records

    def dns_pointer(self, override_ttl: int | None = None) -> DNSPointer:
        """Return DNSPointer from ServiceInfo."""
        return self._dns_pointer(override_ttl)

    def _dns_pointer(self, override_ttl: int | None) -> DNSPointer:
        """Return DNSPointer from ServiceInfo."""
        cacheable = override_ttl is None
        if self._dns_pointer_cache is not None and cacheable:
            return self._dns_pointer_cache
        record = DNSPointer(
            self.type,
            _TYPE_PTR,
            _CLASS_IN,
            override_ttl if override_ttl is not None else self.other_ttl,
            self._name,
            0.0,
        )
        if cacheable:
            self._dns_pointer_cache = record
        return record

    def dns_service(self, override_ttl: int | None = None) -> DNSService:
        """Return DNSService from ServiceInfo."""
        return self._dns_service(override_ttl)

    def _dns_service(self, override_ttl: int | None) -> DNSService:
        """Return DNSService from ServiceInfo."""
        cacheable = override_ttl is None
        if self._dns_service_cache is not None and cacheable:
            return self._dns_service_cache
        port = self.port
        if TYPE_CHECKING:
            assert isinstance(port, int)
        record = DNSService(
            self._name,
            _TYPE_SRV,
            _CLASS_IN_UNIQUE,
            override_ttl if override_ttl is not None else self.host_ttl,
            self.priority,
            self.weight,
            port,
            self.server or self._name,
            0.0,
        )
        if cacheable:
            self._dns_service_cache = record
        return record

    def dns_text(self, override_ttl: int | None = None) -> DNSText:
        """Return DNSText from ServiceInfo."""
        return self._dns_text(override_ttl)

    def _dns_text(self, override_ttl: int | None) -> DNSText:
        """Return DNSText from ServiceInfo."""
        cacheable = override_ttl is None
        if self._dns_text_cache is not None and cacheable:
            return self._dns_text_cache
        record = DNSText(
            self._name,
            _TYPE_TXT,
            _CLASS_IN_UNIQUE,
            override_ttl if override_ttl is not None else self.other_ttl,
            self.text,
            0.0,
        )
        if cacheable:
            self._dns_text_cache = record
        return record

    def dns_nsec(self, missing_types: list[int], override_ttl: int | None = None) -> DNSNsec:
        """Return DNSNsec from ServiceInfo."""
        return self._dns_nsec(missing_types, override_ttl)

    def _dns_nsec(self, missing_types: list[int], override_ttl: int | None) -> DNSNsec:
        """Return DNSNsec from ServiceInfo."""
        return DNSNsec(
            self._name,
            _TYPE_NSEC,
            _CLASS_IN_UNIQUE,
            override_ttl if override_ttl is not None else self.host_ttl,
            self._name,
            missing_types,
            0.0,
        )

    def get_address_and_nsec_records(self, override_ttl: int | None = None) -> set[DNSRecord]:
        """Build a set of address records and NSEC records for non-present record types."""
        return self._get_address_and_nsec_records(override_ttl)

    def _get_address_and_nsec_records(self, override_ttl: int | None) -> set[DNSRecord]:
        """Build a set of address records and NSEC records for non-present record types."""
        cacheable = override_ttl is None
        if self._get_address_and_nsec_records_cache is not None and cacheable:
            return self._get_address_and_nsec_records_cache
        missing_types: set[int] = _ADDRESS_RECORD_TYPES.copy()
        records: set[DNSRecord] = set()
        for dns_address in self._dns_addresses(override_ttl, IPVersion.All):
            missing_types.discard(dns_address.type)
            records.add(dns_address)
        if missing_types:
            assert self.server is not None, "Service server must be set for NSEC record."
            records.add(self._dns_nsec(list(missing_types), override_ttl))
        if cacheable:
            self._get_address_and_nsec_records_cache = records
        return records

    def _get_address_records_from_cache_by_type(self, zc: Zeroconf, _type: int_) -> list[DNSAddress]:
        """Get the addresses from the cache."""
        if self.server_key is None:
            return []
        cache = zc.cache
        if TYPE_CHECKING:
            records = cast(
                list[DNSAddress],
                cache.get_all_by_details(self.server_key, _type, _CLASS_IN),
            )
        else:
            records = cache.get_all_by_details(self.server_key, _type, _CLASS_IN)
        return records

    def set_server_if_missing(self) -> None:
        """Set the server if it is missing.

        This function is for backwards compatibility.
        """
        if self.server is None:
            self.server = self._name
            self.server_key = self.key

    def load_from_cache(self, zc: Zeroconf, now: float_ | None = None) -> bool:
        """Populate the service info from the cache.

        This method is designed to be threadsafe.
        """
        return self._load_from_cache(zc, now or current_time_millis())

    def _load_from_cache(self, zc: Zeroconf, now: float_) -> bool:
        """Populate the service info from the cache.

        This method is designed to be threadsafe.
        """
        cache = zc.cache
        original_server_key = self.server_key
        cached_srv_record = cache.get_by_details(self._name, _TYPE_SRV, _CLASS_IN)
        if cached_srv_record:
            self._process_record_threadsafe(zc, cached_srv_record, now)
        cached_txt_record = cache.get_by_details(self._name, _TYPE_TXT, _CLASS_IN)
        if cached_txt_record:
            self._process_record_threadsafe(zc, cached_txt_record, now)
        if original_server_key == self.server_key:
            # If there is a srv which changes the server_key,
            # A and AAAA will already be loaded from the cache
            # and we do not want to do it twice
            for record in self._get_address_records_from_cache_by_type(zc, _TYPE_A):
                self._process_record_threadsafe(zc, record, now)
            for record in self._get_address_records_from_cache_by_type(zc, _TYPE_AAAA):
                self._process_record_threadsafe(zc, record, now)
        return self._is_complete

    @property
    def _is_complete(self) -> bool:
        """The ServiceInfo has all expected properties."""
        return bool(self.text is not None and (self._ipv4_addresses or self._ipv6_addresses))

    def request(
        self,
        zc: Zeroconf,
        timeout: float,
        question_type: DNSQuestionType | None = None,
        addr: str | None = None,
        port: int = _MDNS_PORT,
    ) -> bool:
        """Returns true if the service could be discovered on the
        network, and updates this object with details discovered.

        While it is not expected during normal operation,
        this function may raise EventLoopBlocked if the underlying
        call to `async_request` cannot be completed.

        :param zc: Zeroconf instance
        :param timeout: time in milliseconds to wait for a response
        :param question_type: question type to ask
        :param addr: address to send the request to
        :param port: port to send the request to
        """
        assert zc.loop is not None, "Zeroconf instance must have a loop, was it not started?"
        assert zc.loop.is_running(), "Zeroconf instance loop must be running, was it already stopped?"
        if zc.loop == get_running_loop():
            raise RuntimeError("Use AsyncServiceInfo.async_request from the event loop")
        return bool(
            run_coro_with_timeout(
                self.async_request(zc, timeout, question_type, addr, port),
                zc.loop,
                timeout,
            )
        )

    def _get_initial_delay(self) -> float_:
        return _LISTENER_TIME

    def _get_random_delay(self) -> int_:
        return randint(*_AVOID_SYNC_DELAY_RANDOM_INTERVAL)

    async def async_request(
        self,
        zc: Zeroconf,
        timeout: float,
        question_type: DNSQuestionType | None = None,
        addr: str | None = None,
        port: int = _MDNS_PORT,
    ) -> bool:
        """Returns true if the service could be discovered on the
        network, and updates this object with details discovered.

        This method will be run in the event loop.

        Passing addr and port is optional, and will default to the
        mDNS multicast address and port. This is useful for directing
        requests to a specific host that may be able to respond across
        subnets.

        :param zc: Zeroconf instance
        :param timeout: time in milliseconds to wait for a response
        :param question_type: question type to ask
        :param addr: address to send the request to
        :param port: port to send the request to
        """
        if not zc.started:
            await zc.async_wait_for_start()

        now = current_time_millis()

        if self._load_from_cache(zc, now):
            return True

        if TYPE_CHECKING:
            assert zc.loop is not None

        first_request = True
        delay = self._get_initial_delay()
        next_ = now
        last = now + timeout
        try:
            zc.async_add_listener(self, None)
            while not self._is_complete:
                if last <= now:
                    return False
                if next_ <= now:
                    this_question_type = question_type or QU_QUESTION if first_request else QM_QUESTION
                    out = self._generate_request_query(zc, now, this_question_type)
                    first_request = False
                    if out.questions:
                        # All questions may have been suppressed
                        # by the question history, so nothing to send,
                        # but keep waiting for answers in case another
                        # client on the network is asking the same
                        # question or they have not arrived yet.
                        zc.async_send(out, addr, port)
                    next_ = now + delay
                    next_ += self._get_random_delay()
                    if this_question_type is QM_QUESTION and delay < _DUPLICATE_QUESTION_INTERVAL:
                        # If we just asked a QM question, we need to
                        # wait at least the duplicate question interval
                        # before asking another QM question otherwise
                        # its likely to be suppressed by the question
                        # history of the remote responder.
                        delay = _DUPLICATE_QUESTION_INTERVAL

                await self.async_wait(min(next_, last) - now, zc.loop)
                now = current_time_millis()
        finally:
            zc.async_remove_listener(self)

        return True

    def _add_question_with_known_answers(
        self,
        out: DNSOutgoing,
        qu_question: bool,
        question_history: QuestionHistory,
        cache: DNSCache,
        now: float_,
        name: str_,
        type_: int_,
        class_: int_,
        skip_if_known_answers: bool,
    ) -> None:
        """Add a question with known answers if its not suppressed."""
        known_answers = {
            answer for answer in cache.get_all_by_details(name, type_, class_) if not answer.is_stale(now)
        }
        if skip_if_known_answers and known_answers:
            return
        question = DNSQuestion(name, type_, class_)
        if qu_question:
            question.unicast = True
        elif question_history.suppresses(question, now, known_answers):
            return
        else:
            question_history.add_question_at_time(question, now, known_answers)
        out.add_question(question)
        for answer in known_answers:
            out.add_answer_at_time(answer, now)

    def _generate_request_query(
        self, zc: Zeroconf, now: float_, question_type: DNSQuestionType
    ) -> DNSOutgoing:
        """Generate the request query."""
        out = DNSOutgoing(_FLAGS_QR_QUERY)
        name = self._name
        server = self.server or name
        cache = zc.cache
        history = zc.question_history
        qu_question = question_type is QU_QUESTION
        if _TYPE_SRV in self._query_record_types:
            self._add_question_with_known_answers(
                out, qu_question, history, cache, now, name, _TYPE_SRV, _CLASS_IN, True
            )
        if _TYPE_TXT in self._query_record_types:
            self._add_question_with_known_answers(
                out, qu_question, history, cache, now, name, _TYPE_TXT, _CLASS_IN, True
            )
        if _TYPE_A in self._query_record_types:
            self._add_question_with_known_answers(
                out, qu_question, history, cache, now, server, _TYPE_A, _CLASS_IN, False
            )
        if _TYPE_AAAA in self._query_record_types:
            self._add_question_with_known_answers(
                out, qu_question, history, cache, now, server, _TYPE_AAAA, _CLASS_IN, False
            )
        return out

    def __repr__(self) -> str:
        """String representation"""
        return "{}({})".format(
            type(self).__name__,
            ", ".join(
                f"{name}={getattr(self, name)!r}"
                for name in (
                    "type",
                    "name",
                    "addresses",
                    "port",
                    "weight",
                    "priority",
                    "server",
                    "properties",
                    "interface_index",
                )
            ),
        )


class AsyncServiceInfo(ServiceInfo):
    """An async version of ServiceInfo."""


class AddressResolver(ServiceInfo):
    """Resolve a host name to an IP address."""

    def __init__(self, server: str) -> None:
        """Initialize the AddressResolver."""
        super().__init__(server, server, server=server)
        self._query_record_types = _TYPE_A_AAAA_RECORDS

    @property
    def _is_complete(self) -> bool:
        """The ServiceInfo has all expected properties."""
        return bool(self._ipv4_addresses) or bool(self._ipv6_addresses)


class AddressResolverIPv6(ServiceInfo):
    """Resolve a host name to an IPv6 address."""

    def __init__(self, server: str) -> None:
        """Initialize the AddressResolver."""
        super().__init__(server, server, server=server)
        self._query_record_types = _TYPE_AAAA_RECORDS

    @property
    def _is_complete(self) -> bool:
        """The ServiceInfo has all expected properties."""
        return bool(self._ipv6_addresses)


class AddressResolverIPv4(ServiceInfo):
    """Resolve a host name to an IPv4 address."""

    def __init__(self, server: str) -> None:
        """Initialize the AddressResolver."""
        super().__init__(server, server, server=server)
        self._query_record_types = _TYPE_A_RECORDS

    @property
    def _is_complete(self) -> bool:
        """The ServiceInfo has all expected properties."""
        return bool(self._ipv4_addresses)
07070100000050000081A400000000000000000000000167C7AD16000002ED000000000000000000000000000000000000003C00000000python-zeroconf-0.146.0/src/zeroconf/_services/registry.pxd
import cython

from .info cimport ServiceInfo


cdef class ServiceRegistry:

    cdef cython.dict _services
    cdef public cython.dict types
    cdef public cython.dict servers
    cdef public bint has_entries

    @cython.locals(
        record_list=cython.list,
    )
    cdef cython.list _async_get_by_index(self, cython.dict records, str key)

    cdef _add(self, ServiceInfo info)

    @cython.locals(
        info=ServiceInfo,
        old_service_info=ServiceInfo
    )
    cdef _remove(self, cython.list infos)

    cpdef ServiceInfo async_get_info_name(self, str name)

    cpdef cython.list async_get_types(self)

    cpdef cython.list async_get_infos_type(self, str type_)

    cpdef cython.list async_get_infos_server(self, str server)
07070100000051000081A400000000000000000000000167C7AD160000104D000000000000000000000000000000000000003B00000000python-zeroconf-0.146.0/src/zeroconf/_services/registry.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from .._exceptions import ServiceNameAlreadyRegistered
from .info import ServiceInfo

_str = str


class ServiceRegistry:
    """A registry to keep track of services.

    The registry must only be accessed from
    the event loop as it is not thread safe.
    """

    __slots__ = ("_services", "has_entries", "servers", "types")

    def __init__(
        self,
    ) -> None:
        """Create the ServiceRegistry class."""
        self._services: dict[str, ServiceInfo] = {}
        self.types: dict[str, list] = {}
        self.servers: dict[str, list] = {}
        self.has_entries: bool = False

    def async_add(self, info: ServiceInfo) -> None:
        """Add a new service to the registry."""
        self._add(info)

    def async_remove(self, info: list[ServiceInfo] | ServiceInfo) -> None:
        """Remove a new service from the registry."""
        self._remove(info if isinstance(info, list) else [info])

    def async_update(self, info: ServiceInfo) -> None:
        """Update new service in the registry."""
        self._remove([info])
        self._add(info)

    def async_get_service_infos(self) -> list[ServiceInfo]:
        """Return all ServiceInfo."""
        return list(self._services.values())

    def async_get_info_name(self, name: str) -> ServiceInfo | None:
        """Return all ServiceInfo for the name."""
        return self._services.get(name)

    def async_get_types(self) -> list[str]:
        """Return all types."""
        return list(self.types)

    def async_get_infos_type(self, type_: str) -> list[ServiceInfo]:
        """Return all ServiceInfo matching type."""
        return self._async_get_by_index(self.types, type_)

    def async_get_infos_server(self, server: str) -> list[ServiceInfo]:
        """Return all ServiceInfo matching server."""
        return self._async_get_by_index(self.servers, server)

    def _async_get_by_index(self, records: dict[str, list], key: _str) -> list[ServiceInfo]:
        """Return all ServiceInfo matching the index."""
        record_list = records.get(key)
        if record_list is None:
            return []
        return [self._services[name] for name in record_list]

    def _add(self, info: ServiceInfo) -> None:
        """Add a new service under the lock."""
        assert info.server_key is not None, "ServiceInfo must have a server"
        if info.key in self._services:
            raise ServiceNameAlreadyRegistered

        info.async_clear_cache()
        self._services[info.key] = info
        self.types.setdefault(info.type.lower(), []).append(info.key)
        self.servers.setdefault(info.server_key, []).append(info.key)
        self.has_entries = True

    def _remove(self, infos: list[ServiceInfo]) -> None:
        """Remove a services under the lock."""
        for info in infos:
            old_service_info = self._services.get(info.key)
            if old_service_info is None:
                continue
            assert old_service_info.server_key is not None
            self.types[old_service_info.type.lower()].remove(info.key)
            self.servers[old_service_info.server_key].remove(info.key)
            del self._services[info.key]

        self.has_entries = bool(self._services)
07070100000052000081A400000000000000000000000167C7AD1600000B67000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/src/zeroconf/_services/types.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import time

from .._core import Zeroconf
from .._services import ServiceListener
from .._utils.net import InterfaceChoice, InterfacesType, IPVersion
from ..const import _SERVICE_TYPE_ENUMERATION_NAME
from .browser import ServiceBrowser


class ZeroconfServiceTypes(ServiceListener):
    """
    Return all of the advertised services on any local networks
    """

    def __init__(self) -> None:
        """Keep track of found services in a set."""
        self.found_services: set[str] = set()

    def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        """Service added."""
        self.found_services.add(name)

    def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        """Service updated."""

    def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
        """Service removed."""

    @classmethod
    def find(
        cls,
        zc: Zeroconf | None = None,
        timeout: int | float = 5,
        interfaces: InterfacesType = InterfaceChoice.All,
        ip_version: IPVersion | None = None,
    ) -> tuple[str, ...]:
        """
        Return all of the advertised services on any local networks.

        :param zc: Zeroconf() instance.  Pass in if already have an
                instance running or if non-default interfaces are needed
        :param timeout: seconds to wait for any responses
        :param interfaces: interfaces to listen on.
        :param ip_version: IP protocol version to use.
        :return: tuple of service type strings
        """
        local_zc = zc or Zeroconf(interfaces=interfaces, ip_version=ip_version)
        listener = cls()
        browser = ServiceBrowser(local_zc, _SERVICE_TYPE_ENUMERATION_NAME, listener=listener)

        # wait for responses
        time.sleep(timeout)

        browser.cancel()

        # close down anything we opened
        if zc is None:
            local_zc.close()

        return tuple(sorted(listener.found_services))
07070100000053000081A400000000000000000000000167C7AD16000007D7000000000000000000000000000000000000003300000000python-zeroconf-0.146.0/src/zeroconf/_transport.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import socket


class _WrappedTransport:
    """A wrapper for transports."""

    __slots__ = (
        "fileno",
        "is_ipv6",
        "sock",
        "sock_name",
        "transport",
    )

    def __init__(
        self,
        transport: asyncio.DatagramTransport,
        is_ipv6: bool,
        sock: socket.socket,
        fileno: int,
        sock_name: tuple,
    ) -> None:
        """Initialize the wrapped transport.

        These attributes are used when sending packets.
        """
        self.transport = transport
        self.is_ipv6 = is_ipv6
        self.sock = sock
        self.fileno = fileno
        self.sock_name = sock_name


def make_wrapped_transport(transport: asyncio.DatagramTransport) -> _WrappedTransport:
    """Make a wrapped transport."""
    sock: socket.socket = transport.get_extra_info("socket")
    return _WrappedTransport(
        transport=transport,
        is_ipv6=sock.family == socket.AF_INET6,
        sock=sock,
        fileno=sock.fileno(),
        sock_name=sock.getsockname(),
    )
07070100000054000081A400000000000000000000000167C7AD16000000BD000000000000000000000000000000000000003200000000python-zeroconf-0.146.0/src/zeroconf/_updates.pxd
import cython


cdef class RecordUpdateListener:

    cpdef void async_update_records(self, object zc, double now, cython.list records)

    cpdef void async_update_records_complete(self)
07070100000055000081A400000000000000000000000167C7AD1600000B0F000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/src/zeroconf/_updates.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from typing import TYPE_CHECKING

from ._dns import DNSRecord
from ._record_update import RecordUpdate

if TYPE_CHECKING:
    from ._core import Zeroconf


float_ = float


class RecordUpdateListener:
    """Base call for all record listeners.

    All listeners passed to async_add_listener should use RecordUpdateListener
    as a base class. In the future it will be required.
    """

    def update_record(  # pylint: disable=no-self-use
        self, zc: Zeroconf, now: float, record: DNSRecord
    ) -> None:
        """Update a single record.

        This method is deprecated and will be removed in a future version.
        update_records should be implemented instead.
        """
        raise RuntimeError("update_record is deprecated and will be removed in a future version.")

    def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None:
        """Update multiple records in one shot.

        All records that are received in a single packet are passed
        to update_records.

        This implementation is a compatibility shim to ensure older code
        that uses RecordUpdateListener as a base class will continue to
        get calls to update_record. This method will raise
        NotImplementedError in a future version.

        At this point the cache will not have the new records

        Records are passed as a list of RecordUpdate.  This
        allows consumers of async_update_records to avoid cache lookups.

        This method will be run in the event loop.
        """
        for record in records:
            self.update_record(zc, now, record.new)

    def async_update_records_complete(self) -> None:
        """Called when a record update has completed for all handlers.

        At this point the cache will have the new records.

        This method will be run in the event loop.
        """
07070100000056000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002C00000000python-zeroconf-0.146.0/src/zeroconf/_utils07070100000057000081A400000000000000000000000167C7AD16000003B2000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/src/zeroconf/_utils/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations
07070100000058000081A400000000000000000000000167C7AD16000013B2000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/src/zeroconf/_utils/asyncio.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import concurrent.futures
import contextlib
import sys
from collections.abc import Awaitable, Coroutine
from typing import Any

from .._exceptions import EventLoopBlocked
from ..const import _LOADED_SYSTEM_TIMEOUT
from .time import millis_to_seconds

# The combined timeouts should be lower than _CLOSE_TIMEOUT + _WAIT_FOR_LOOP_TASKS_TIMEOUT
_TASK_AWAIT_TIMEOUT = 1
_GET_ALL_TASKS_TIMEOUT = 3
_WAIT_FOR_LOOP_TASKS_TIMEOUT = 3  # Must be larger than _TASK_AWAIT_TIMEOUT


def _set_future_none_if_not_done(fut: asyncio.Future) -> None:
    """Set a future to None if it is not done."""
    if not fut.done():  # pragma: no branch
        fut.set_result(None)


def _resolve_all_futures_to_none(futures: set[asyncio.Future]) -> None:
    """Resolve all futures to None."""
    for fut in futures:
        _set_future_none_if_not_done(fut)
    futures.clear()


async def wait_for_future_set_or_timeout(
    loop: asyncio.AbstractEventLoop, future_set: set[asyncio.Future], timeout: float
) -> None:
    """Wait for a future or timeout (in milliseconds)."""
    future = loop.create_future()
    future_set.add(future)
    handle = loop.call_later(millis_to_seconds(timeout), _set_future_none_if_not_done, future)
    try:
        await future
    finally:
        handle.cancel()
        future_set.discard(future)


async def wait_future_or_timeout(future: asyncio.Future[bool | None], timeout: float) -> None:
    """Wait for a future or timeout."""
    loop = asyncio.get_running_loop()
    handle = loop.call_later(timeout, _set_future_none_if_not_done, future)
    try:
        await future
    except asyncio.CancelledError:
        if sys.version_info >= (3, 11) and (task := asyncio.current_task()) and task.cancelling():
            raise
    finally:
        handle.cancel()


async def _async_get_all_tasks(loop: asyncio.AbstractEventLoop) -> set[asyncio.Task]:
    """Return all tasks running."""
    await asyncio.sleep(0)  # flush out any call_soon_threadsafe
    # If there are multiple event loops running, all_tasks is not
    # safe EVEN WHEN CALLED FROM THE EVENTLOOP
    # under PyPy so we have to try a few times.
    for _ in range(3):
        with contextlib.suppress(RuntimeError):
            return asyncio.all_tasks(loop)
    return set()


async def _wait_for_loop_tasks(wait_tasks: set[asyncio.Task]) -> None:
    """Wait for the event loop thread we started to shutdown."""
    await asyncio.wait(wait_tasks, timeout=_TASK_AWAIT_TIMEOUT)


async def await_awaitable(aw: Awaitable) -> None:
    """Wait on an awaitable and the task it returns."""
    task = await aw
    await task


def run_coro_with_timeout(aw: Coroutine, loop: asyncio.AbstractEventLoop, timeout: float) -> Any:
    """Run a coroutine with a timeout.

    The timeout should only be used as a safeguard to prevent
    the program from blocking forever. The timeout should
    never be expected to be reached during normal operation.

    While not expected during normal operations, the
    function raises `EventLoopBlocked` if the coroutine takes
    longer to complete than the timeout.
    """
    try:
        return asyncio.run_coroutine_threadsafe(aw, loop).result(
            millis_to_seconds(timeout) + _LOADED_SYSTEM_TIMEOUT
        )
    except concurrent.futures.TimeoutError as ex:
        raise EventLoopBlocked from ex


def shutdown_loop(loop: asyncio.AbstractEventLoop) -> None:
    """Wait for pending tasks and stop an event loop."""
    pending_tasks = set(
        asyncio.run_coroutine_threadsafe(_async_get_all_tasks(loop), loop).result(_GET_ALL_TASKS_TIMEOUT)
    )
    pending_tasks -= {task for task in pending_tasks if task.done()}
    if pending_tasks:
        asyncio.run_coroutine_threadsafe(_wait_for_loop_tasks(pending_tasks), loop).result(
            _WAIT_FOR_LOOP_TASKS_TIMEOUT
        )
    loop.call_soon_threadsafe(loop.stop)


def get_running_loop() -> asyncio.AbstractEventLoop | None:
    """Check if an event loop is already running."""
    with contextlib.suppress(RuntimeError):
        return asyncio.get_running_loop()
    return None
07070100000059000081A400000000000000000000000167C7AD160000012D000000000000000000000000000000000000003A00000000python-zeroconf-0.146.0/src/zeroconf/_utils/ipaddress.pxdfrom .._dns cimport DNSAddress

cdef bint TYPE_CHECKING


cpdef get_ip_address_object_from_record(DNSAddress record)


@cython.locals(address_str=str)
cpdef str_without_scope_id(object addr)


cpdef ip_bytes_and_scope_to_address(object addr, object scope_id)


cdef object cached_ip_addresses_wrapper
0707010000005A000081A400000000000000000000000167C7AD16000014CC000000000000000000000000000000000000003900000000python-zeroconf-0.146.0/src/zeroconf/_utils/ipaddress.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from functools import cache, lru_cache
from ipaddress import AddressValueError, IPv4Address, IPv6Address, NetmaskValueError
from typing import Any

from .._dns import DNSAddress
from ..const import _TYPE_AAAA

bytes_ = bytes
int_ = int


class ZeroconfIPv4Address(IPv4Address):
    __slots__ = ("__hash__", "_is_link_local", "_is_loopback", "_is_unspecified", "_str", "zc_integer")

    def __init__(self, *args: Any, **kwargs: Any) -> None:
        """Initialize a new IPv4 address."""
        super().__init__(*args, **kwargs)
        self._str = super().__str__()
        self._is_link_local = super().is_link_local
        self._is_unspecified = super().is_unspecified
        self._is_loopback = super().is_loopback
        self.__hash__ = cache(lambda: IPv4Address.__hash__(self))  # type: ignore[method-assign]
        self.zc_integer = int(self)

    def __str__(self) -> str:
        """Return the string representation of the IPv4 address."""
        return self._str

    @property
    def is_link_local(self) -> bool:
        """Return True if this is a link-local address."""
        return self._is_link_local

    @property
    def is_unspecified(self) -> bool:
        """Return True if this is an unspecified address."""
        return self._is_unspecified

    @property
    def is_loopback(self) -> bool:
        """Return True if this is a loop back."""
        return self._is_loopback


class ZeroconfIPv6Address(IPv6Address):
    __slots__ = ("__hash__", "_is_link_local", "_is_loopback", "_is_unspecified", "_str", "zc_integer")

    def __init__(self, *args: Any, **kwargs: Any) -> None:
        """Initialize a new IPv6 address."""
        super().__init__(*args, **kwargs)
        self._str = super().__str__()
        self._is_link_local = super().is_link_local
        self._is_unspecified = super().is_unspecified
        self._is_loopback = super().is_loopback
        self.__hash__ = cache(lambda: IPv6Address.__hash__(self))  # type: ignore[method-assign]
        self.zc_integer = int(self)

    def __str__(self) -> str:
        """Return the string representation of the IPv6 address."""
        return self._str

    @property
    def is_link_local(self) -> bool:
        """Return True if this is a link-local address."""
        return self._is_link_local

    @property
    def is_unspecified(self) -> bool:
        """Return True if this is an unspecified address."""
        return self._is_unspecified

    @property
    def is_loopback(self) -> bool:
        """Return True if this is a loop back."""
        return self._is_loopback


@lru_cache(maxsize=512)
def _cached_ip_addresses(
    address: str | bytes | int,
) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None:
    """Cache IP addresses."""
    try:
        return ZeroconfIPv4Address(address)
    except (AddressValueError, NetmaskValueError):
        pass

    try:
        return ZeroconfIPv6Address(address)
    except (AddressValueError, NetmaskValueError):
        return None


cached_ip_addresses_wrapper = _cached_ip_addresses
cached_ip_addresses = cached_ip_addresses_wrapper


def get_ip_address_object_from_record(
    record: DNSAddress,
) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None:
    """Get the IP address object from the record."""
    if record.type == _TYPE_AAAA and record.scope_id:
        return ip_bytes_and_scope_to_address(record.address, record.scope_id)
    return cached_ip_addresses_wrapper(record.address)


def ip_bytes_and_scope_to_address(
    address: bytes_, scope: int_
) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None:
    """Convert the bytes and scope to an IP address object."""
    base_address = cached_ip_addresses_wrapper(address)
    if base_address is not None and base_address.is_link_local:
        # Avoid expensive __format__ call by using PyUnicode_Join
        return cached_ip_addresses_wrapper("".join((str(base_address), "%", str(scope))))
    return base_address


def str_without_scope_id(addr: ZeroconfIPv4Address | ZeroconfIPv6Address) -> str:
    """Return the string representation of the address without the scope id."""
    if addr.version == 6:
        address_str = str(addr)
        return address_str.partition("%")[0]
    return str(addr)


__all__ = (
    "cached_ip_addresses",
    "get_ip_address_object_from_record",
    "ip_bytes_and_scope_to_address",
    "str_without_scope_id",
)
0707010000005B000081A400000000000000000000000167C7AD1600001AEC000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/src/zeroconf/_utils/name.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

from functools import lru_cache

from .._exceptions import BadTypeInNameException
from ..const import (
    _HAS_A_TO_Z,
    _HAS_ASCII_CONTROL_CHARS,
    _HAS_ONLY_A_TO_Z_NUM_HYPHEN,
    _HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE,
    _LOCAL_TRAILER,
    _NONTCP_PROTOCOL_LOCAL_TRAILER,
    _TCP_PROTOCOL_LOCAL_TRAILER,
)


@lru_cache(maxsize=512)
def service_type_name(type_: str, *, strict: bool = True) -> str:  # pylint: disable=too-many-branches
    """
    Validate a fully qualified service name, instance or subtype. [rfc6763]

    Returns fully qualified service name.

    Domain names used by mDNS-SD take the following forms:

                   <sn> . <_tcp|_udp> . local.
      <Instance> . <sn> . <_tcp|_udp> . local.
      <sub>._sub . <sn> . <_tcp|_udp> . local.

    1) must end with 'local.'

      This is true because we are implementing mDNS and since the 'm' means
      multi-cast, the 'local.' domain is mandatory.

    2) local is preceded with either '_udp.' or '_tcp.' unless
       strict is False

    3) service name <sn> precedes <_tcp|_udp> unless
       strict is False

      The rules for Service Names [RFC6335] state that they may be no more
      than fifteen characters long (not counting the mandatory underscore),
      consisting of only letters, digits, and hyphens, must begin and end
      with a letter or digit, must not contain consecutive hyphens, and
      must contain at least one letter.

    The instance name <Instance> and sub type <sub> may be up to 63 bytes.

    The portion of the Service Instance Name is a user-
    friendly name consisting of arbitrary Net-Unicode text [RFC5198]. It
    MUST NOT contain ASCII control characters (byte values 0x00-0x1F and
    0x7F) [RFC20] but otherwise is allowed to contain any characters,
    without restriction, including spaces, uppercase, lowercase,
    punctuation -- including dots -- accented characters, non-Roman text,
    and anything else that may be represented using Net-Unicode.

    :param type_: Type, SubType or service name to validate
    :return: fully qualified service name (eg: _http._tcp.local.)
    """
    if len(type_) > 256:
        # https://datatracker.ietf.org/doc/html/rfc6763#section-7.2
        raise BadTypeInNameException(f"Full name ({type_}) must be > 256 bytes")

    if type_.endswith((_TCP_PROTOCOL_LOCAL_TRAILER, _NONTCP_PROTOCOL_LOCAL_TRAILER)):
        remaining = type_[: -len(_TCP_PROTOCOL_LOCAL_TRAILER)].split(".")
        trailer = type_[-len(_TCP_PROTOCOL_LOCAL_TRAILER) :]
        has_protocol = True
    elif strict:
        raise BadTypeInNameException(
            f"Type '{type_}' must end with "
            f"'{_TCP_PROTOCOL_LOCAL_TRAILER}' or '{_NONTCP_PROTOCOL_LOCAL_TRAILER}'"
        )
    elif type_.endswith(_LOCAL_TRAILER):
        remaining = type_[: -len(_LOCAL_TRAILER)].split(".")
        trailer = type_[-len(_LOCAL_TRAILER) + 1 :]
        has_protocol = False
    else:
        raise BadTypeInNameException(f"Type '{type_}' must end with '{_LOCAL_TRAILER}'")

    if strict or has_protocol:
        service_name = remaining.pop()
        if not service_name:
            raise BadTypeInNameException("No Service name found")

        if len(remaining) == 1 and len(remaining[0]) == 0:
            raise BadTypeInNameException(f"Type '{type_}' must not start with '.'")

        if service_name[0] != "_":
            raise BadTypeInNameException(f"Service name ({service_name}) must start with '_'")

        test_service_name = service_name[1:]

        if strict and len(test_service_name) > 15:
            # https://datatracker.ietf.org/doc/html/rfc6763#section-7.2
            raise BadTypeInNameException(f"Service name ({test_service_name}) must be <= 15 bytes")

        if "--" in test_service_name:
            raise BadTypeInNameException(f"Service name ({test_service_name}) must not contain '--'")

        if "-" in (test_service_name[0], test_service_name[-1]):
            raise BadTypeInNameException(f"Service name ({test_service_name}) may not start or end with '-'")

        if not _HAS_A_TO_Z.search(test_service_name):
            raise BadTypeInNameException(
                f"Service name ({test_service_name}) must contain at least one letter (eg: 'A-Z')"
            )

        allowed_characters_re = (
            _HAS_ONLY_A_TO_Z_NUM_HYPHEN if strict else _HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE
        )

        if not allowed_characters_re.search(test_service_name):
            raise BadTypeInNameException(
                f"Service name ({test_service_name if strict else ''}) "
                "must contain only these characters: "
                "A-Z, a-z, 0-9, hyphen ('-')" + ", underscore ('_')"
                if strict
                else ""
            )
    else:
        service_name = ""

    if remaining and remaining[-1] == "_sub":
        remaining.pop()
        if len(remaining) == 0 or len(remaining[0]) == 0:
            raise BadTypeInNameException("_sub requires a subtype name")

    if len(remaining) > 1:
        remaining = [".".join(remaining)]

    if remaining:
        length = len(remaining[0].encode("utf-8"))
        if length > 63:
            raise BadTypeInNameException(f"Too long: '{remaining[0]}'")

        if _HAS_ASCII_CONTROL_CHARS.search(remaining[0]):
            raise BadTypeInNameException(
                f"Ascii control character 0x00-0x1F and 0x7F illegal in '{remaining[0]}'"
            )

    return service_name + trailer


def possible_types(name: str) -> set[str]:
    """Build a set of all possible types from a fully qualified name."""
    labels = name.split(".")
    label_count = len(labels)
    types = set()
    for count in range(label_count):
        parts = labels[label_count - count - 4 :]
        if not parts[0].startswith("_"):
            break
        types.add(".".join(parts))
    return types


cached_possible_types = lru_cache(maxsize=256)(possible_types)
0707010000005C000081A400000000000000000000000167C7AD160000421C000000000000000000000000000000000000003300000000python-zeroconf-0.146.0/src/zeroconf/_utils/net.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import enum
import errno
import ipaddress
import socket
import struct
import sys
from collections.abc import Sequence
from typing import Any, Union, cast

import ifaddr

from .._logger import log
from ..const import _IPPROTO_IPV6, _MDNS_ADDR, _MDNS_ADDR6, _MDNS_PORT


@enum.unique
class InterfaceChoice(enum.Enum):
    Default = 1
    All = 2


InterfacesType = Union[Sequence[Union[str, int, tuple[tuple[str, int, int], int]]], InterfaceChoice]


@enum.unique
class ServiceStateChange(enum.Enum):
    Added = 1
    Removed = 2
    Updated = 3


@enum.unique
class IPVersion(enum.Enum):
    V4Only = 1
    V6Only = 2
    All = 3


# utility functions


def _is_v6_address(addr: bytes) -> bool:
    return len(addr) == 16


def _encode_address(address: str) -> bytes:
    is_ipv6 = ":" in address
    address_family = socket.AF_INET6 if is_ipv6 else socket.AF_INET
    return socket.inet_pton(address_family, address)


def get_all_addresses() -> list[str]:
    return list({addr.ip for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv4})  # type: ignore[misc]


def get_all_addresses_v6() -> list[tuple[tuple[str, int, int], int]]:
    # IPv6 multicast uses positive indexes for interfaces
    # TODO: What about multi-address interfaces?
    return list(
        {(addr.ip, iface.index) for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv6}  # type: ignore[misc]
    )


def ip6_to_address_and_index(adapters: list[ifaddr.Adapter], ip: str) -> tuple[tuple[str, int, int], int]:
    if "%" in ip:
        ip = ip[: ip.index("%")]  # Strip scope_id.
    ipaddr = ipaddress.ip_address(ip)
    for adapter in adapters:
        for adapter_ip in adapter.ips:
            # IPv6 addresses are represented as tuples
            if (
                adapter.index is not None
                and isinstance(adapter_ip.ip, tuple)
                and ipaddress.ip_address(adapter_ip.ip[0]) == ipaddr
            ):
                return (adapter_ip.ip, adapter.index)

    raise RuntimeError(f"No adapter found for IP address {ip}")


def interface_index_to_ip6_address(adapters: list[ifaddr.Adapter], index: int) -> tuple[str, int, int]:
    for adapter in adapters:
        if adapter.index == index:
            for adapter_ip in adapter.ips:
                # IPv6 addresses are represented as tuples
                if isinstance(adapter_ip.ip, tuple):
                    return adapter_ip.ip

    raise RuntimeError(f"No adapter found for index {index}")


def ip6_addresses_to_indexes(
    interfaces: Sequence[str | int | tuple[tuple[str, int, int], int]],
) -> list[tuple[tuple[str, int, int], int]]:
    """Convert IPv6 interface addresses to interface indexes.

    IPv4 addresses are ignored.

    :param interfaces: List of IP addresses and indexes.
    :returns: List of indexes.
    """
    result = []
    adapters = ifaddr.get_adapters()

    for iface in interfaces:
        if isinstance(iface, int):
            result.append((interface_index_to_ip6_address(adapters, iface), iface))  # type: ignore[arg-type]
        elif isinstance(iface, str) and ipaddress.ip_address(iface).version == 6:
            result.append(ip6_to_address_and_index(adapters, iface))  # type: ignore[arg-type]

    return result


def normalize_interface_choice(
    choice: InterfacesType, ip_version: IPVersion = IPVersion.V4Only
) -> list[str | tuple[tuple[str, int, int], int]]:
    """Convert the interfaces choice into internal representation.

    :param choice: `InterfaceChoice` or list of interface addresses or indexes (IPv6 only).
    :param ip_address: IP version to use (ignored if `choice` is a list).
    :returns: List of IP addresses (for IPv4) and indexes (for IPv6).
    """
    result: list[str | tuple[tuple[str, int, int], int]] = []
    if choice is InterfaceChoice.Default:
        if ip_version != IPVersion.V4Only:
            # IPv6 multicast uses interface 0 to mean the default
            result.append((("", 0, 0), 0))
        if ip_version != IPVersion.V6Only:
            result.append("0.0.0.0")
    elif choice is InterfaceChoice.All:
        if ip_version != IPVersion.V4Only:
            result.extend(get_all_addresses_v6())
        if ip_version != IPVersion.V6Only:
            result.extend(get_all_addresses())
        if not result:
            raise RuntimeError(
                f"No interfaces to listen on, check that any interfaces have IP version {ip_version}"
            )
    elif isinstance(choice, list):
        # First, take IPv4 addresses.
        result = [i for i in choice if isinstance(i, str) and ipaddress.ip_address(i).version == 4]
        # Unlike IP_ADD_MEMBERSHIP, IPV6_JOIN_GROUP requires interface indexes.
        result += ip6_addresses_to_indexes(choice)
    else:
        raise TypeError(f"choice must be a list or InterfaceChoice, got {choice!r}")
    return result


def disable_ipv6_only_or_raise(s: socket.socket) -> None:
    """Make V6 sockets work for both V4 and V6 (required for Windows)."""
    try:
        s.setsockopt(_IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
    except OSError:
        log.error("Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6")
        raise


def set_so_reuseport_if_available(s: socket.socket) -> None:
    """Set SO_REUSEADDR on a socket if available."""
    # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
    # multicast UDP sockets (p 731, "TCP/IP Illustrated,
    # Volume 2"), but some BSD-derived systems require
    # SO_REUSEPORT to be specified explicitly.  Also, not all
    # versions of Python have SO_REUSEPORT available.
    # Catch OSError and socket.error for kernel versions <3.9 because lacking
    # SO_REUSEPORT support.
    if not hasattr(socket, "SO_REUSEPORT"):
        return

    try:
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)  # pylint: disable=no-member
    except OSError as err:
        if err.errno != errno.ENOPROTOOPT:
            raise


def set_mdns_port_socket_options_for_ip_version(
    s: socket.socket,
    bind_addr: tuple[str] | tuple[str, int, int],
    ip_version: IPVersion,
) -> None:
    """Set ttl/hops and loop for mdns port."""
    if ip_version != IPVersion.V6Only:
        ttl = struct.pack(b"B", 255)
        loop = struct.pack(b"B", 1)
        # OpenBSD needs the ttl and loop values for the IP_MULTICAST_TTL and
        # IP_MULTICAST_LOOP socket options as an unsigned char.
        try:
            s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
            s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, loop)
        except OSError as e:
            if bind_addr[0] != "" or get_errno(e) != errno.EINVAL:  # Fails to set on MacOS
                raise

    if ip_version != IPVersion.V4Only:
        # However, char doesn't work here (at least on Linux)
        s.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
        s.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, True)


def new_socket(
    bind_addr: tuple[str] | tuple[str, int, int],
    port: int = _MDNS_PORT,
    ip_version: IPVersion = IPVersion.V4Only,
    apple_p2p: bool = False,
) -> socket.socket | None:
    log.debug(
        "Creating new socket with port %s, ip_version %s, apple_p2p %s and bind_addr %r",
        port,
        ip_version,
        apple_p2p,
        bind_addr,
    )
    socket_family = socket.AF_INET if ip_version == IPVersion.V4Only else socket.AF_INET6
    s = socket.socket(socket_family, socket.SOCK_DGRAM)

    if ip_version == IPVersion.All:
        disable_ipv6_only_or_raise(s)

    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    set_so_reuseport_if_available(s)

    if port == _MDNS_PORT:
        set_mdns_port_socket_options_for_ip_version(s, bind_addr, ip_version)

    if apple_p2p:
        # SO_RECV_ANYIF = 0x1104
        # https://opensource.apple.com/source/xnu/xnu-4570.41.2/bsd/sys/socket.h
        s.setsockopt(socket.SOL_SOCKET, 0x1104, 1)

    bind_tup = (bind_addr[0], port, *bind_addr[1:])
    try:
        s.bind(bind_tup)
    except OSError as ex:
        if ex.errno == errno.EADDRNOTAVAIL:
            log.warning(
                "Address not available when binding to %s, it is expected to happen on some systems",
                bind_tup,
            )
            return None
        if ex.errno == errno.EADDRINUSE:
            if sys.platform.startswith("darwin") or sys.platform.startswith("freebsd"):
                log.error(
                    "Address in use when binding to %s; "
                    "On BSD based systems sharing the same port with another "
                    "stack may require processes to run with the same UID; "
                    "When using avahi, make sure disallow-other-stacks is set"
                    " to no in avahi-daemon.conf",
                    bind_tup,
                )
            else:
                log.error(
                    "Address in use when binding to %s; "
                    "When using avahi, make sure disallow-other-stacks is set"
                    " to no in avahi-daemon.conf",
                    bind_tup,
                )
            # This is still a fatal error as its not going to work
            # if we can't hear the traffic coming in.
        raise
    log.debug("Created socket %s", s)
    return s


def add_multicast_member(
    listen_socket: socket.socket,
    interface: str | tuple[tuple[str, int, int], int],
) -> bool:
    # This is based on assumptions in normalize_interface_choice
    is_v6 = isinstance(interface, tuple)
    err_einval = {errno.EINVAL}
    if sys.platform == "win32":
        # No WSAEINVAL definition in typeshed
        err_einval |= {cast(Any, errno).WSAEINVAL}  # pylint: disable=no-member
    log.debug("Adding %r (socket %d) to multicast group", interface, listen_socket.fileno())
    try:
        if is_v6:
            try:
                mdns_addr6_bytes = socket.inet_pton(socket.AF_INET6, _MDNS_ADDR6)
            except OSError:
                log.info(
                    "Unable to translate IPv6 address when adding %s to multicast group, "
                    "this can happen if IPv6 is disabled on the system",
                    interface,
                )
                return False
            iface_bin = struct.pack("@I", cast(int, interface[1]))
            _value = mdns_addr6_bytes + iface_bin
            listen_socket.setsockopt(_IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, _value)
        else:
            _value = socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(cast(str, interface))
            listen_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, _value)
    except OSError as e:
        _errno = get_errno(e)
        if _errno == errno.EADDRINUSE:
            log.info(
                "Address in use when adding %s to multicast group, it is expected to happen on some systems",
                interface,
            )
            return False
        if _errno == errno.ENOBUFS:
            # https://github.com/python-zeroconf/python-zeroconf/issues/1510
            if not is_v6 and sys.platform.startswith("linux"):
                log.warning(
                    "No buffer space available when adding %s to multicast group, "
                    "try increasing `net.ipv4.igmp_max_memberships` to `1024` in sysctl.conf",
                    interface,
                )
            else:
                log.warning(
                    "No buffer space available when adding %s to multicast group.",
                    interface,
                )
            return False
        if _errno == errno.EADDRNOTAVAIL:
            log.info(
                "Address not available when adding %s to multicast "
                "group, it is expected to happen on some systems",
                interface,
            )
            return False
        if _errno in err_einval:
            log.info(
                "Interface of %s does not support multicast, it is expected in WSL",
                interface,
            )
            return False
        if _errno == errno.ENOPROTOOPT:
            log.info(
                "Failed to set socket option on %s, this can happen if "
                "the network adapter is in a disconnected state",
                interface,
            )
            return False
        if is_v6 and _errno == errno.ENODEV:
            log.info(
                "Address in use when adding %s to multicast group, "
                "it is expected to happen when the device does not have ipv6",
                interface,
            )
            return False
        raise
    return True


def new_respond_socket(
    interface: str | tuple[tuple[str, int, int], int],
    apple_p2p: bool = False,
) -> socket.socket | None:
    is_v6 = isinstance(interface, tuple)
    respond_socket = new_socket(
        ip_version=(IPVersion.V6Only if is_v6 else IPVersion.V4Only),
        apple_p2p=apple_p2p,
        bind_addr=cast(tuple[tuple[str, int, int], int], interface)[0] if is_v6 else (cast(str, interface),),
    )
    if not respond_socket:
        return None
    log.debug("Configuring socket %s with multicast interface %s", respond_socket, interface)
    if is_v6:
        iface_bin = struct.pack("@I", cast(int, interface[1]))
        respond_socket.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, iface_bin)
    else:
        respond_socket.setsockopt(
            socket.IPPROTO_IP,
            socket.IP_MULTICAST_IF,
            socket.inet_aton(cast(str, interface)),
        )
    return respond_socket


def create_sockets(
    interfaces: InterfacesType = InterfaceChoice.All,
    unicast: bool = False,
    ip_version: IPVersion = IPVersion.V4Only,
    apple_p2p: bool = False,
) -> tuple[socket.socket | None, list[socket.socket]]:
    if unicast:
        listen_socket = None
    else:
        listen_socket = new_socket(ip_version=ip_version, apple_p2p=apple_p2p, bind_addr=("",))

    normalized_interfaces = normalize_interface_choice(interfaces, ip_version)

    # If we are using InterfaceChoice.Default we can use
    # a single socket to listen and respond.
    if not unicast and interfaces is InterfaceChoice.Default:
        for i in normalized_interfaces:
            add_multicast_member(cast(socket.socket, listen_socket), i)
        return listen_socket, [cast(socket.socket, listen_socket)]

    respond_sockets = []

    for i in normalized_interfaces:
        if not unicast:
            if add_multicast_member(cast(socket.socket, listen_socket), i):
                respond_socket = new_respond_socket(i, apple_p2p=apple_p2p)
            else:
                respond_socket = None
        else:
            respond_socket = new_socket(
                port=0,
                ip_version=ip_version,
                apple_p2p=apple_p2p,
                bind_addr=i[0] if isinstance(i, tuple) else (i,),
            )

        if respond_socket is not None:
            respond_sockets.append(respond_socket)

    return listen_socket, respond_sockets


def get_errno(e: OSError) -> int:
    return cast(int, e.args[0])


def can_send_to(ipv6_socket: bool, address: str) -> bool:
    """Check if the address type matches the socket type.

    This function does not validate if the address is a valid
    ipv6 or ipv4 address.
    """
    return ":" in address if ipv6_socket else ":" not in address


def autodetect_ip_version(interfaces: InterfacesType) -> IPVersion:
    """Auto detect the IP version when it is not provided."""
    if isinstance(interfaces, list):
        has_v6 = any(
            isinstance(i, int) or (isinstance(i, str) and ipaddress.ip_address(i).version == 6)
            for i in interfaces
        )
        has_v4 = any(isinstance(i, str) and ipaddress.ip_address(i).version == 4 for i in interfaces)
        if has_v4 and has_v6:
            return IPVersion.All
        if has_v6:
            return IPVersion.V6Only

    return IPVersion.V4Only
0707010000005D000081A400000000000000000000000167C7AD160000004C000000000000000000000000000000000000003500000000python-zeroconf-0.146.0/src/zeroconf/_utils/time.pxd
cpdef double current_time_millis()

cpdef millis_to_seconds(double millis)
0707010000005E000081A400000000000000000000000167C7AD1600000556000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/src/zeroconf/_utils/time.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import time

_float = float


def current_time_millis() -> _float:
    """Current time in milliseconds.

    The current implementation uses `time.monotonic`
    but may change in the future.

    The design requires the time to match asyncio.loop.time()
    """
    return time.monotonic() * 1000


def millis_to_seconds(millis: _float) -> _float:
    """Convert milliseconds to seconds."""
    return millis / 1000.0
0707010000005F000081A400000000000000000000000167C7AD1600002B6C000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/src/zeroconf/asyncio.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import contextlib
from collections.abc import Awaitable
from types import TracebackType  # used in type hints
from typing import Callable

from ._core import Zeroconf
from ._dns import DNSQuestionType
from ._exceptions import NotRunningException
from ._services import ServiceListener
from ._services.browser import _ServiceBrowserBase
from ._services.info import AsyncServiceInfo, ServiceInfo
from ._services.types import ZeroconfServiceTypes
from ._utils.net import InterfaceChoice, InterfacesType, IPVersion
from .const import _BROWSER_TIME, _MDNS_PORT, _SERVICE_TYPE_ENUMERATION_NAME

__all__ = [
    "AsyncServiceBrowser",
    "AsyncServiceInfo",
    "AsyncZeroconf",
    "AsyncZeroconfServiceTypes",
]


class AsyncServiceBrowser(_ServiceBrowserBase):
    """Used to browse for a service for specific type(s).

    Constructor parameters are as follows:

    * `zc`: A Zeroconf instance
    * `type_`: fully qualified service type name
    * `handler`: ServiceListener or Callable that knows how to process ServiceStateChange events
    * `listener`: ServiceListener
    * `addr`: address to send queries (will default to multicast)
    * `port`: port to send queries (will default to mdns 5353)
    * `delay`: The initial delay between answering questions
    * `question_type`: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)

    The listener object will have its add_service() and
    remove_service() methods called when this browser
    discovers changes in the services availability.
    """

    def __init__(
        self,
        zeroconf: Zeroconf,
        type_: str | list,
        handlers: ServiceListener | list[Callable[..., None]] | None = None,
        listener: ServiceListener | None = None,
        addr: str | None = None,
        port: int = _MDNS_PORT,
        delay: int = _BROWSER_TIME,
        question_type: DNSQuestionType | None = None,
    ) -> None:
        super().__init__(zeroconf, type_, handlers, listener, addr, port, delay, question_type)
        self._async_start()

    async def async_cancel(self) -> None:
        """Cancel the browser."""
        self._async_cancel()

    async def __aenter__(self) -> AsyncServiceBrowser:
        return self

    async def __aexit__(
        self,
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    ) -> bool | None:
        await self.async_cancel()
        return None


class AsyncZeroconfServiceTypes(ZeroconfServiceTypes):
    """An async version of ZeroconfServiceTypes."""

    @classmethod
    async def async_find(
        cls,
        aiozc: AsyncZeroconf | None = None,
        timeout: int | float = 5,
        interfaces: InterfacesType = InterfaceChoice.All,
        ip_version: IPVersion | None = None,
    ) -> tuple[str, ...]:
        """
        Return all of the advertised services on any local networks.

        :param aiozc: AsyncZeroconf() instance.  Pass in if already have an
                instance running or if non-default interfaces are needed
        :param timeout: seconds to wait for any responses
        :param interfaces: interfaces to listen on.
        :param ip_version: IP protocol version to use.
        :return: tuple of service type strings
        """
        local_zc = aiozc or AsyncZeroconf(interfaces=interfaces, ip_version=ip_version)
        listener = cls()
        async_browser = AsyncServiceBrowser(
            local_zc.zeroconf, _SERVICE_TYPE_ENUMERATION_NAME, listener=listener
        )

        # wait for responses
        await asyncio.sleep(timeout)

        await async_browser.async_cancel()

        # close down anything we opened
        if aiozc is None:
            await local_zc.async_close()

        return tuple(sorted(listener.found_services))


class AsyncZeroconf:
    """Implementation of Zeroconf Multicast DNS Service Discovery

    Supports registration, unregistration, queries and browsing.

    The async version is currently a wrapper around Zeroconf which
    is now also async. It is expected that an asyncio event loop
    is already running before creating the AsyncZeroconf object.
    """

    def __init__(
        self,
        interfaces: InterfacesType = InterfaceChoice.All,
        unicast: bool = False,
        ip_version: IPVersion | None = None,
        apple_p2p: bool = False,
        zc: Zeroconf | None = None,
    ) -> None:
        """Creates an instance of the Zeroconf class, establishing
        multicast communications, and listening.

        :param interfaces: :class:`InterfaceChoice` or a list of IP addresses
            (IPv4 and IPv6) and interface indexes (IPv6 only).

            IPv6 notes for non-POSIX systems:
            * `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
              on Python versions before 3.8.

            Also listening on loopback (``::1``) doesn't work, use a real address.
        :param ip_version: IP versions to support. If `choice` is a list, the default is detected
            from it. Otherwise defaults to V4 only for backward compatibility.
        :param apple_p2p: use AWDL interface (only macOS)
        """
        self.zeroconf = zc or Zeroconf(
            interfaces=interfaces,
            unicast=unicast,
            ip_version=ip_version,
            apple_p2p=apple_p2p,
        )
        self.async_browsers: dict[ServiceListener, AsyncServiceBrowser] = {}

    async def async_register_service(
        self,
        info: ServiceInfo,
        ttl: int | None = None,
        allow_name_change: bool = False,
        cooperating_responders: bool = False,
        strict: bool = True,
    ) -> Awaitable:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service.  The name of the service may be changed if needed to make
        it unique on the network. Additionally multiple cooperating responders
        can register the same service on the network for resilience
        (if you want this behavior set `cooperating_responders` to `True`).

        The service will be broadcast in a task. This task is returned
        and therefore can be awaited if necessary.
        """
        return await self.zeroconf.async_register_service(
            info, ttl, allow_name_change, cooperating_responders, strict
        )

    async def async_unregister_all_services(self) -> None:
        """Unregister all registered services.

        Unlike async_register_service and async_unregister_service, this
        method does not return a future and is always expected to be
        awaited since its only called at shutdown.
        """
        await self.zeroconf.async_unregister_all_services()

    async def async_unregister_service(self, info: ServiceInfo) -> Awaitable:
        """Unregister a service.

        The service will be broadcast in a task. This task is returned
        and therefore can be awaited if necessary.
        """
        return await self.zeroconf.async_unregister_service(info)

    async def async_update_service(self, info: ServiceInfo) -> Awaitable:
        """Registers service information to the network with a default TTL.
        Zeroconf will then respond to requests for information for that
        service.

        The service will be broadcast in a task. This task is returned
        and therefore can be awaited if necessary.
        """
        return await self.zeroconf.async_update_service(info)

    async def async_close(self) -> None:
        """Ends the background threads, and prevent this instance from
        servicing further queries."""
        if not self.zeroconf.done:
            with contextlib.suppress(NotRunningException):
                await self.zeroconf.async_wait_for_start(timeout=1.0)
        await self.async_remove_all_service_listeners()
        await self.async_unregister_all_services()
        await self.zeroconf._async_close()  # pylint: disable=protected-access

    async def async_get_service_info(
        self,
        type_: str,
        name: str,
        timeout: int = 3000,
        question_type: DNSQuestionType | None = None,
    ) -> AsyncServiceInfo | None:
        """Returns network's service information for a particular
        name and type, or None if no service matches by the timeout,
        which defaults to 3 seconds.

        :param type_: fully qualified service type name
        :param name: the name of the service
        :param timeout: milliseconds to wait for a response
        :param question_type: The type of questions to ask (DNSQuestionType.QM or DNSQuestionType.QU)
        """
        return await self.zeroconf.async_get_service_info(type_, name, timeout, question_type)

    async def async_add_service_listener(self, type_: str, listener: ServiceListener) -> None:
        """Adds a listener for a particular service type.  This object
        will then have its add_service and remove_service methods called when
        services of that type become available and unavailable."""
        await self.async_remove_service_listener(listener)
        self.async_browsers[listener] = AsyncServiceBrowser(self.zeroconf, type_, listener)

    async def async_remove_service_listener(self, listener: ServiceListener) -> None:
        """Removes a listener from the set that is currently listening."""
        if listener in self.async_browsers:
            await self.async_browsers[listener].async_cancel()
            del self.async_browsers[listener]

    async def async_remove_all_service_listeners(self) -> None:
        """Removes a listener from the set that is currently listening."""
        await asyncio.gather(
            *(self.async_remove_service_listener(listener) for listener in list(self.async_browsers))
        )

    async def __aenter__(self) -> AsyncZeroconf:
        return self

    async def __aexit__(
        self,
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    ) -> bool | None:
        await self.async_close()
        return None
07070100000060000081A400000000000000000000000167C7AD1600001166000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/src/zeroconf/const.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import re
import socket

# Some timing constants

_UNREGISTER_TIME = 125  # ms
_CHECK_TIME = 175  # ms
_REGISTER_TIME = 225  # ms
_LISTENER_TIME = 200  # ms
_BROWSER_TIME = 10000  # ms
_DUPLICATE_PACKET_SUPPRESSION_INTERVAL = 1000  # ms
_DUPLICATE_QUESTION_INTERVAL = 999  # ms # Must be 1ms less than _DUPLICATE_PACKET_SUPPRESSION_INTERVAL
_CACHE_CLEANUP_INTERVAL = 10  # s
_LOADED_SYSTEM_TIMEOUT = 10  # s
_STARTUP_TIMEOUT = 9  # s must be lower than _LOADED_SYSTEM_TIMEOUT
_ONE_SECOND = 1000  # ms

# If the system is loaded or the event
# loop was blocked by another task that was doing I/O in the loop
# (shouldn't happen but it does in practice) we need to give
# a buffer timeout to ensure a coroutine can finish before
# the future times out

# Some DNS constants

_MDNS_ADDR = "224.0.0.251"
_MDNS_ADDR6 = "ff02::fb"
_MDNS_PORT = 5353
_DNS_PORT = 53
_DNS_HOST_TTL = 120  # two minute for host records (A, SRV etc) as-per RFC6762
_DNS_OTHER_TTL = 4500  # 75 minutes for non-host records (PTR, TXT etc) as-per RFC6762
# Currently we enforce a minimum TTL for PTR records to avoid
# ServiceBrowsers generating excessive queries refresh queries.
# Apple uses a 15s minimum TTL, however we do not have the same
# level of rate limit and safe guards so we use 1/4 of the recommended value
_DNS_PTR_MIN_TTL = _DNS_OTHER_TTL / 4

_DNS_PACKET_HEADER_LEN = 12

_MAX_MSG_TYPICAL = 1460  # unused
_MAX_MSG_ABSOLUTE = 8966

_FLAGS_QR_MASK = 0x8000  # query response mask
_FLAGS_QR_QUERY = 0x0000  # query
_FLAGS_QR_RESPONSE = 0x8000  # response

_FLAGS_AA = 0x0400  # Authoritative answer
_FLAGS_TC = 0x0200  # Truncated
_FLAGS_RD = 0x0100  # Recursion desired
_FLAGS_RA = 0x8000  # Recursion available

_FLAGS_Z = 0x0040  # Zero
_FLAGS_AD = 0x0020  # Authentic data
_FLAGS_CD = 0x0010  # Checking disabled

_CLASS_IN = 1
_CLASS_CS = 2
_CLASS_CH = 3
_CLASS_HS = 4
_CLASS_NONE = 254
_CLASS_ANY = 255
_CLASS_MASK = 0x7FFF
_CLASS_UNIQUE = 0x8000
_CLASS_IN_UNIQUE = _CLASS_IN | _CLASS_UNIQUE

_TYPE_A = 1
_TYPE_NS = 2
_TYPE_MD = 3
_TYPE_MF = 4
_TYPE_CNAME = 5
_TYPE_SOA = 6
_TYPE_MB = 7
_TYPE_MG = 8
_TYPE_MR = 9
_TYPE_NULL = 10
_TYPE_WKS = 11
_TYPE_PTR = 12
_TYPE_HINFO = 13
_TYPE_MINFO = 14
_TYPE_MX = 15
_TYPE_TXT = 16
_TYPE_AAAA = 28
_TYPE_SRV = 33
_TYPE_NSEC = 47
_TYPE_ANY = 255

# Mapping constants to names

_CLASSES = {
    _CLASS_IN: "in",
    _CLASS_CS: "cs",
    _CLASS_CH: "ch",
    _CLASS_HS: "hs",
    _CLASS_NONE: "none",
    _CLASS_ANY: "any",
}

_TYPES = {
    _TYPE_A: "a",
    _TYPE_NS: "ns",
    _TYPE_MD: "md",
    _TYPE_MF: "mf",
    _TYPE_CNAME: "cname",
    _TYPE_SOA: "soa",
    _TYPE_MB: "mb",
    _TYPE_MG: "mg",
    _TYPE_MR: "mr",
    _TYPE_NULL: "null",
    _TYPE_WKS: "wks",
    _TYPE_PTR: "ptr",
    _TYPE_HINFO: "hinfo",
    _TYPE_MINFO: "minfo",
    _TYPE_MX: "mx",
    _TYPE_TXT: "txt",
    _TYPE_AAAA: "quada",
    _TYPE_SRV: "srv",
    _TYPE_ANY: "any",
    _TYPE_NSEC: "nsec",
}

_ADDRESS_RECORD_TYPES = {_TYPE_A, _TYPE_AAAA}

_HAS_A_TO_Z = re.compile(r"[A-Za-z]")
_HAS_ONLY_A_TO_Z_NUM_HYPHEN = re.compile(r"^[A-Za-z0-9\-]+$")
_HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE = re.compile(r"^[A-Za-z0-9\-\_]+$")
_HAS_ASCII_CONTROL_CHARS = re.compile(r"[\x00-\x1f\x7f]")

_EXPIRE_REFRESH_TIME_PERCENT = 75

_LOCAL_TRAILER = ".local."
_TCP_PROTOCOL_LOCAL_TRAILER = "._tcp.local."
_NONTCP_PROTOCOL_LOCAL_TRAILER = "._udp.local."

# https://datatracker.ietf.org/doc/html/rfc6763#section-9
_SERVICE_TYPE_ENUMERATION_NAME = "_services._dns-sd._udp.local."

_IPPROTO_IPV6 = socket.IPPROTO_IPV6
07070100000061000081A400000000000000000000000167C7AD1600000000000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/src/zeroconf/py.typed07070100000062000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000001E00000000python-zeroconf-0.146.0/tests07070100000063000081A400000000000000000000000167C7AD1600000DD0000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/tests/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations

import asyncio
import socket
import time
from functools import cache
from unittest import mock

import ifaddr

from zeroconf import DNSIncoming, DNSQuestion, DNSRecord, Zeroconf
from zeroconf._history import QuestionHistory

_MONOTONIC_RESOLUTION = time.get_clock_info("monotonic").resolution


class QuestionHistoryWithoutSuppression(QuestionHistory):
    def suppresses(self, question: DNSQuestion, now: float, known_answers: set[DNSRecord]) -> bool:
        return False


def _inject_responses(zc: Zeroconf, msgs: list[DNSIncoming]) -> None:
    """Inject a DNSIncoming response."""
    assert zc.loop is not None

    async def _wait_for_response():
        for msg in msgs:
            zc.record_manager.async_updates_from_response(msg)

    asyncio.run_coroutine_threadsafe(_wait_for_response(), zc.loop).result()


def _inject_response(zc: Zeroconf, msg: DNSIncoming) -> None:
    """Inject a DNSIncoming response."""
    _inject_responses(zc, [msg])


def _wait_for_start(zc: Zeroconf) -> None:
    """Wait for all sockets to be up and running."""
    assert zc.loop is not None
    asyncio.run_coroutine_threadsafe(zc.async_wait_for_start(), zc.loop).result()


@cache
def has_working_ipv6():
    """Return True if the system can bind an IPv6 address."""
    if not socket.has_ipv6:
        return False

    sock = None
    try:
        sock = socket.socket(socket.AF_INET6)
        sock.bind(("::1", 0))
    except Exception:
        return False
    finally:
        if sock:
            sock.close()

    for iface in ifaddr.get_adapters():
        for addr in iface.ips:
            if addr.is_IPv6 and iface.index is not None:
                return True
    return False


def _clear_cache(zc: Zeroconf) -> None:
    zc.cache.cache.clear()
    zc.question_history.clear()


def time_changed_millis(millis: float | None = None) -> None:
    """Call all scheduled events for a time."""
    loop = asyncio.get_running_loop()
    loop_time = loop.time()
    if millis is not None:
        mock_seconds_into_future = millis / 1000
    else:
        mock_seconds_into_future = loop_time

    with mock.patch("time.monotonic", return_value=mock_seconds_into_future):
        for task in list(loop._scheduled):  # type: ignore[attr-defined]
            if not isinstance(task, asyncio.TimerHandle):
                continue
            if task.cancelled():
                continue

            future_seconds = task.when() - (loop_time + _MONOTONIC_RESOLUTION)

            if mock_seconds_into_future >= future_seconds:
                task._run()
                task.cancel()
07070100000064000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002900000000python-zeroconf-0.146.0/tests/benchmarks07070100000065000081A400000000000000000000000167C7AD1600000023000000000000000000000000000000000000003500000000python-zeroconf-0.146.0/tests/benchmarks/__init__.pyfrom __future__ import annotations
07070100000066000081A400000000000000000000000167C7AD160000158B000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/tests/benchmarks/helpers.py"""Benchmark helpers."""

from __future__ import annotations

import socket

from zeroconf import DNSAddress, DNSOutgoing, DNSService, DNSText, const


def generate_packets() -> DNSOutgoing:
    out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    address = socket.inet_pton(socket.AF_INET, "192.168.208.5")

    additionals = [
        {
            "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
            "address": address,
            "port": 51832,
            "text": b"\x13md=HASS Bridge"
            b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
        },
        {
            "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
            "address": address,
            "port": 51834,
            "text": b"\x13md=HASS Bridge"
            b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
        },
        {
            "name": "Master Bed TV CEDB27._hap._tcp.local.",
            "address": address,
            "port": 51830,
            "text": b"\x10md=Master Bed"
            b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=CVj1kw==",
        },
        {
            "name": "Living Room TV 921B77._hap._tcp.local.",
            "address": address,
            "port": 51833,
            "text": b"\x11md=Living Room"
            b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=qU77SQ==",
        },
        {
            "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
            "address": address,
            "port": 51829,
            "text": b"\x13md=HASS Bridge"
            b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b0QZlg==",
        },
        {
            "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
            "address": address,
            "port": 51837,
            "text": b"\x13md=HASS Bridge"
            b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=ahAISA==",
        },
        {
            "name": "FrontdoorCamera 8941D1._hap._tcp.local.",
            "address": address,
            "port": 54898,
            "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
            b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
        },
        {
            "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            "address": address,
            "port": 51836,
            "text": b"\x13md=HASS Bridge"
            b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=6fLM5A==",
        },
        {
            "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
            "address": address,
            "port": 51838,
            "text": b"\x13md=HASS Bridge"
            b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=u3bdfw==",
        },
        {
            "name": "Snooze Room TV 6B89B0._hap._tcp.local.",
            "address": address,
            "port": 51835,
            "text": b"\x11md=Snooze Room"
            b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=xNTqsg==",
        },
        {
            "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
            "address": address,
            "port": 54811,
            "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
            b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
        },
        {
            "name": "HASS Bridge OS95 39C053._hap._tcp.local.",
            "address": address,
            "port": 51831,
            "text": b"\x13md=HASS Bridge"
            b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
            b"\x04sf=0\x0bsh=Xfe5LQ==",
        },
    ]

    out.add_answer_at_time(
        DNSText(
            "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )

    for record in additionals:
        out.add_additional_answer(
            DNSService(
                record["name"],  # type: ignore
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                record["port"],  # type: ignore
                record["name"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSText(
                record["name"],  # type: ignore
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["text"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSAddress(
                record["name"],  # type: ignore
                const._TYPE_A,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                record["address"],  # type: ignore
            )
        )

    return out
07070100000067000081A400000000000000000000000167C7AD1600000558000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/tests/benchmarks/test_cache.pyfrom __future__ import annotations

from pytest_codspeed import BenchmarkFixture

from zeroconf import DNSCache, DNSPointer, current_time_millis
from zeroconf.const import _CLASS_IN, _TYPE_PTR


def test_add_expire_1000_records(benchmark: BenchmarkFixture) -> None:
    """Benchmark for DNSCache to expire 10000 records."""
    cache = DNSCache()
    now = current_time_millis()
    records = [
        DNSPointer(
            name=f"test{id}.local.",
            type_=_TYPE_PTR,
            class_=_CLASS_IN,
            ttl=60,
            alias=f"test{id}.local.",
            created=now + id,
        )
        for id in range(1000)
    ]

    @benchmark
    def _expire_records() -> None:
        cache.async_add_records(records)
        cache.async_expire(now + 100_000)


def test_expire_no_records_to_expire(benchmark: BenchmarkFixture) -> None:
    """Benchmark for DNSCache with 1000 records none to expire."""
    cache = DNSCache()
    now = current_time_millis()
    cache.async_add_records(
        DNSPointer(
            name=f"test{id}.local.",
            type_=_TYPE_PTR,
            class_=_CLASS_IN,
            ttl=60,
            alias=f"test{id}.local.",
            created=now + id,
        )
        for id in range(1000)
    )
    cache.async_expire(now)

    @benchmark
    def _expire_records() -> None:
        cache.async_expire(now)
07070100000068000081A400000000000000000000000167C7AD160000186C000000000000000000000000000000000000003A00000000python-zeroconf-0.146.0/tests/benchmarks/test_incoming.py"""Benchmark for DNSIncoming."""

from __future__ import annotations

import socket

from pytest_codspeed import BenchmarkFixture

from zeroconf import (
    DNSAddress,
    DNSIncoming,
    DNSNsec,
    DNSOutgoing,
    DNSService,
    DNSText,
    const,
)


def generate_packets() -> list[bytes]:
    out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    address = socket.inet_pton(socket.AF_INET, "192.168.208.5")

    additionals = [
        {
            "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
            "address": address,
            "port": 51832,
            "text": b"\x13md=HASS Bridge"
            b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
        },
        {
            "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
            "address": address,
            "port": 51834,
            "text": b"\x13md=HASS Bridge"
            b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
        },
        {
            "name": "Master Bed TV CEDB27._hap._tcp.local.",
            "address": address,
            "port": 51830,
            "text": b"\x10md=Master Bed"
            b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=CVj1kw==",
        },
        {
            "name": "Living Room TV 921B77._hap._tcp.local.",
            "address": address,
            "port": 51833,
            "text": b"\x11md=Living Room"
            b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=qU77SQ==",
        },
        {
            "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
            "address": address,
            "port": 51829,
            "text": b"\x13md=HASS Bridge"
            b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b0QZlg==",
        },
        {
            "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
            "address": address,
            "port": 51837,
            "text": b"\x13md=HASS Bridge"
            b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=ahAISA==",
        },
        {
            "name": "FrontdoorCamera 8941D1._hap._tcp.local.",
            "address": address,
            "port": 54898,
            "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
            b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
        },
        {
            "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            "address": address,
            "port": 51836,
            "text": b"\x13md=HASS Bridge"
            b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=6fLM5A==",
        },
        {
            "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
            "address": address,
            "port": 51838,
            "text": b"\x13md=HASS Bridge"
            b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=u3bdfw==",
        },
        {
            "name": "Snooze Room TV 6B89B0._hap._tcp.local.",
            "address": address,
            "port": 51835,
            "text": b"\x11md=Snooze Room"
            b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=xNTqsg==",
        },
        {
            "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
            "address": address,
            "port": 54811,
            "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
            b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
        },
        {
            "name": "HASS Bridge OS95 39C053._hap._tcp.local.",
            "address": address,
            "port": 51831,
            "text": b"\x13md=HASS Bridge"
            b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
            b"\x04sf=0\x0bsh=Xfe5LQ==",
        },
    ]

    out.add_answer_at_time(
        DNSText(
            "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )

    for record in additionals:
        out.add_additional_answer(
            DNSService(
                record["name"],  # type: ignore
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                record["port"],  # type: ignore
                record["name"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSText(
                record["name"],  # type: ignore
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["text"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSAddress(
                record["name"],  # type: ignore
                const._TYPE_A,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                record["address"],  # type: ignore
            )
        )
        out.add_additional_answer(
            DNSNsec(
                record["name"],  # type: ignore
                const._TYPE_NSEC,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["name"],  # type: ignore
                [const._TYPE_TXT, const._TYPE_SRV],
            )
        )

    return out.packets()


packets = generate_packets()


def test_parse_incoming_message(benchmark: BenchmarkFixture) -> None:
    @benchmark
    def parse_incoming_message() -> None:
        for packet in packets:
            DNSIncoming(packet).answers  # noqa: B018
            break
07070100000069000081A400000000000000000000000167C7AD16000001E4000000000000000000000000000000000000003A00000000python-zeroconf-0.146.0/tests/benchmarks/test_outgoing.py"""Benchmark for DNSOutgoing."""

from __future__ import annotations

from pytest_codspeed import BenchmarkFixture

from zeroconf._protocol.outgoing import State

from .helpers import generate_packets


def test_parse_outgoing_message(benchmark: BenchmarkFixture) -> None:
    out = generate_packets()

    @benchmark
    def make_outgoing_message() -> None:
        out.packets()
        out.state = State.init.value
        out.finished = False
        out._reset_for_next_packet()
0707010000006A000081A400000000000000000000000167C7AD1600000251000000000000000000000000000000000000003600000000python-zeroconf-0.146.0/tests/benchmarks/test_send.py"""Benchmark for sending packets."""

from __future__ import annotations

import pytest
from pytest_codspeed import BenchmarkFixture

from zeroconf.asyncio import AsyncZeroconf

from .helpers import generate_packets


@pytest.mark.asyncio
async def test_sending_packets(benchmark: BenchmarkFixture) -> None:
    """Benchmark sending packets."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    out = generate_packets()

    @benchmark
    def _send_packets() -> None:
        aiozc.zeroconf.async_send(out)

    await aiozc.async_close()
0707010000006B000081A400000000000000000000000167C7AD1600000223000000000000000000000000000000000000004000000000python-zeroconf-0.146.0/tests/benchmarks/test_txt_properties.pyfrom __future__ import annotations

from pytest_codspeed import BenchmarkFixture

from zeroconf import ServiceInfo

info = ServiceInfo(
    "_test._tcp.local.",
    "test._test._tcp.local.",
    properties=(
        b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
        b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA=="
    ),
)


def test_txt_properties(benchmark: BenchmarkFixture) -> None:
    @benchmark
    def process_properties() -> None:
        info._properties = None
        info.properties  # noqa: B018
0707010000006C000081A400000000000000000000000167C7AD1600000424000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/tests/conftest.py"""conftest for zeroconf tests."""

from __future__ import annotations

import threading
from unittest.mock import patch

import pytest

from zeroconf import _core, const
from zeroconf._handlers import query_handler


@pytest.fixture(autouse=True)
def verify_threads_ended():
    """Verify that the threads are not running after the test."""
    threads_before = frozenset(threading.enumerate())
    yield
    threads = frozenset(threading.enumerate()) - threads_before
    assert not threads


@pytest.fixture
def run_isolated():
    """Change the mDNS port to run the test in isolation."""
    with (
        patch.object(query_handler, "_MDNS_PORT", 5454),
        patch.object(_core, "_MDNS_PORT", 5454),
        patch.object(const, "_MDNS_PORT", 5454),
    ):
        yield


@pytest.fixture
def disable_duplicate_packet_suppression():
    """Disable duplicate packet suppress.

    Some tests run too slowly because of the duplicate
    packet suppression.
    """
    with patch.object(const, "_DUPLICATE_PACKET_SUPPRESSION_INTERVAL", 0):
        yield
0707010000006D000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002700000000python-zeroconf-0.146.0/tests/services0707010000006E000081A400000000000000000000000167C7AD16000003B2000000000000000000000000000000000000003300000000python-zeroconf-0.146.0/tests/services/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations
0707010000006F000081A400000000000000000000000167C7AD160000EA6A000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/tests/services/test_browser.py"""Unit tests for zeroconf._services.browser."""

from __future__ import annotations

import asyncio
import logging
import os
import socket
import time
import unittest
from collections.abc import Iterable
from threading import Event
from typing import cast
from unittest.mock import patch

import pytest

import zeroconf as r
import zeroconf._services.browser as _services_browser
from zeroconf import (
    DNSPointer,
    DNSQuestion,
    Zeroconf,
    _engine,
    const,
    current_time_millis,
    millis_to_seconds,
)
from zeroconf._services import ServiceStateChange
from zeroconf._services.browser import ServiceBrowser, _ScheduledPTRQuery
from zeroconf._services.info import ServiceInfo
from zeroconf.asyncio import AsyncServiceBrowser, AsyncZeroconf

from .. import (
    QuestionHistoryWithoutSuppression,
    _inject_response,
    _wait_for_start,
    has_working_ipv6,
    time_changed_millis,
)

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming:
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    for record in records:
        generated.add_answer_at_time(record, 0)
    return r.DNSIncoming(generated.packets()[0])


def test_service_browser_cancel_multiple_times():
    """Test we can cancel a ServiceBrowser multiple times before close."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."

    class MyServiceListener(r.ServiceListener):
        pass

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    browser.cancel()
    browser.cancel()
    browser.cancel()

    zc.close()


def test_service_browser_cancel_context_manager():
    """Test we can cancel a ServiceBrowser with it being used as a context manager."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."

    class MyServiceListener(r.ServiceListener):
        pass

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    assert cast(bool, browser.done) is False

    with browser:
        pass

    # ensure call_soon_threadsafe in ServiceBrowser.cancel is run
    assert zc.loop is not None
    asyncio.run_coroutine_threadsafe(asyncio.sleep(0), zc.loop).result()

    assert cast(bool, browser.done) is True

    zc.close()


def test_service_browser_cancel_multiple_times_after_close():
    """Test we can cancel a ServiceBrowser multiple times after close."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."

    class MyServiceListener(r.ServiceListener):
        pass

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    zc.close()

    browser.cancel()
    browser.cancel()
    browser.cancel()


def test_service_browser_started_after_zeroconf_closed():
    """Test starting a ServiceBrowser after close raises RuntimeError."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."

    class MyServiceListener(r.ServiceListener):
        pass

    listener = MyServiceListener()
    zc.close()

    with pytest.raises(RuntimeError):
        r.ServiceBrowser(zc, type_, None, listener)


def test_multiple_instances_running_close():
    """Test we can shutdown multiple instances."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    zc2 = Zeroconf(interfaces=["127.0.0.1"])
    zc3 = Zeroconf(interfaces=["127.0.0.1"])

    assert zc.loop != zc2.loop
    assert zc.loop != zc3.loop

    class MyServiceListener(r.ServiceListener):
        pass

    listener = MyServiceListener()

    zc2.add_service_listener("zca._hap._tcp.local.", listener)

    zc.close()
    zc2.remove_service_listener(listener)
    zc2.close()
    zc3.close()


class TestServiceBrowser(unittest.TestCase):
    def test_update_record(self):
        enable_ipv6 = has_working_ipv6() and not os.environ.get("SKIP_IPV6")

        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_text = b"path=/~matt1/"
        service_address = "10.0.1.2"
        service_v6_address = "2001:db8::1"
        service_v6_second_address = "6001:db8::1"

        service_added_count = 0
        service_removed_count = 0
        service_updated_count = 0
        service_add_event = Event()
        service_removed_event = Event()
        service_updated_event = Event()

        class MyServiceListener(r.ServiceListener):
            def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
                nonlocal service_added_count
                service_added_count += 1
                service_add_event.set()

            def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
                nonlocal service_removed_count
                service_removed_count += 1
                service_removed_event.set()

            def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
                nonlocal service_updated_count
                service_updated_count += 1
                service_info = zc.get_service_info(type_, name)
                assert socket.inet_aton(service_address) in service_info.addresses
                if enable_ipv6:
                    assert socket.inet_pton(
                        socket.AF_INET6, service_v6_address
                    ) in service_info.addresses_by_version(r.IPVersion.V6Only)
                    assert socket.inet_pton(
                        socket.AF_INET6, service_v6_second_address
                    ) in service_info.addresses_by_version(r.IPVersion.V6Only)
                assert service_info.text == service_text
                assert service_info.server.lower() == service_server.lower()
                service_updated_event.set()

        def mock_record_update_incoming_msg(
            service_state_change: r.ServiceStateChange,
        ) -> r.DNSIncoming:
            generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
            assert generated.is_response() is True

            if service_state_change == r.ServiceStateChange.Removed:
                ttl = 0
            else:
                ttl = 120

            generated.add_answer_at_time(
                r.DNSText(
                    service_name,
                    const._TYPE_TXT,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    service_text,
                ),
                0,
            )

            generated.add_answer_at_time(
                r.DNSService(
                    service_name,
                    const._TYPE_SRV,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    0,
                    0,
                    80,
                    service_server,
                ),
                0,
            )

            # Send the IPv6 address first since we previously
            # had a bug where the IPv4 would be missing if the
            # IPv6 was seen first
            if enable_ipv6:
                generated.add_answer_at_time(
                    r.DNSAddress(
                        service_server,
                        const._TYPE_AAAA,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        socket.inet_pton(socket.AF_INET6, service_v6_address),
                    ),
                    0,
                )
                generated.add_answer_at_time(
                    r.DNSAddress(
                        service_server,
                        const._TYPE_AAAA,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        socket.inet_pton(socket.AF_INET6, service_v6_second_address),
                    ),
                    0,
                )
            generated.add_answer_at_time(
                r.DNSAddress(
                    service_server,
                    const._TYPE_A,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    socket.inet_aton(service_address),
                ),
                0,
            )

            generated.add_answer_at_time(
                r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name),
                0,
            )

            return r.DNSIncoming(generated.packets()[0])

        zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])
        service_browser = r.ServiceBrowser(zeroconf, service_type, listener=MyServiceListener())

        try:
            wait_time = 3

            # service added
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Added))
            service_add_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 0
            assert service_removed_count == 0

            # service SRV updated
            service_updated_event.clear()
            service_server = "ash-2.local."
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated))
            service_updated_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 1
            assert service_removed_count == 0

            # service TXT updated
            service_updated_event.clear()
            service_text = b"path=/~matt2/"
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated))
            service_updated_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 2
            assert service_removed_count == 0

            # service TXT updated - duplicate update should not trigger another service_updated
            service_updated_event.clear()
            service_text = b"path=/~matt2/"
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated))
            service_updated_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 2
            assert service_removed_count == 0

            # service A updated
            service_updated_event.clear()
            service_address = "10.0.1.3"
            # Verify we match on uppercase
            service_server = service_server.upper()
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated))
            service_updated_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 3
            assert service_removed_count == 0

            # service all updated
            service_updated_event.clear()
            service_server = "ash-3.local."
            service_text = b"path=/~matt3/"
            service_address = "10.0.1.3"
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated))
            service_updated_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 4
            assert service_removed_count == 0

            # service removed
            _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Removed))
            service_removed_event.wait(wait_time)
            assert service_added_count == 1
            assert service_updated_count == 4
            assert service_removed_count == 1

        finally:
            assert len(zeroconf.listeners) == 1
            service_browser.cancel()
            time.sleep(0.2)
            assert len(zeroconf.listeners) == 0
            zeroconf.remove_all_service_listeners()
            zeroconf.close()


class TestServiceBrowserMultipleTypes(unittest.TestCase):
    def test_update_record(self):
        service_names = [
            "name2._type2._tcp.local.",
            "name._type._tcp.local.",
            "name._type._udp.local",
        ]
        service_types = ["_type2._tcp.local.", "_type._tcp.local.", "_type._udp.local."]

        service_added_count = 0
        service_removed_count = 0
        service_add_event = Event()
        service_removed_event = Event()

        class MyServiceListener(r.ServiceListener):
            def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
                nonlocal service_added_count
                service_added_count += 1
                if service_added_count == 3:
                    service_add_event.set()

            def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
                nonlocal service_removed_count
                service_removed_count += 1
                if service_removed_count == 3:
                    service_removed_event.set()

        def mock_record_update_incoming_msg(
            service_state_change: r.ServiceStateChange,
            service_type: str,
            service_name: str,
            ttl: int,
        ) -> r.DNSIncoming:
            generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
            generated.add_answer_at_time(
                r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name),
                0,
            )
            return r.DNSIncoming(generated.packets()[0])

        zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])
        service_browser = r.ServiceBrowser(zeroconf, service_types, listener=MyServiceListener())

        try:
            wait_time = 3

            # all three services added
            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Added, service_types[0], service_names[0], 120
                ),
            )
            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Added, service_types[1], service_names[1], 120
                ),
            )
            time.sleep(0.1)

            called_with_refresh_time_check = False

            def _mock_get_expiration_time(self, percent):
                nonlocal called_with_refresh_time_check
                if percent == const._EXPIRE_REFRESH_TIME_PERCENT:
                    called_with_refresh_time_check = True
                    return 0
                return self.created + (percent * self.ttl * 10)

            # Set an expire time that will force a refresh
            with patch("zeroconf.DNSRecord.get_expiration_time", new=_mock_get_expiration_time):
                _inject_response(
                    zeroconf,
                    mock_record_update_incoming_msg(
                        r.ServiceStateChange.Added,
                        service_types[0],
                        service_names[0],
                        120,
                    ),
                )
                # Add the last record after updating the first one
                # to ensure the service_add_event only gets set
                # after the update
                _inject_response(
                    zeroconf,
                    mock_record_update_incoming_msg(
                        r.ServiceStateChange.Added,
                        service_types[2],
                        service_names[2],
                        120,
                    ),
                )
                service_add_event.wait(wait_time)
            assert called_with_refresh_time_check is True
            assert service_added_count == 3
            assert service_removed_count == 0

            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Updated, service_types[0], service_names[0], 0
                ),
            )

            # all three services removed
            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Removed, service_types[0], service_names[0], 0
                ),
            )
            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Removed, service_types[1], service_names[1], 0
                ),
            )
            _inject_response(
                zeroconf,
                mock_record_update_incoming_msg(
                    r.ServiceStateChange.Removed, service_types[2], service_names[2], 0
                ),
            )
            service_removed_event.wait(wait_time)
            assert service_added_count == 3
            assert service_removed_count == 3
        except TypeError:
            # Cannot be patched with cython as get_expiration_time is immutable
            pass

        finally:
            assert len(zeroconf.listeners) == 1
            service_browser.cancel()
            time.sleep(0.2)
            assert len(zeroconf.listeners) == 0
            zeroconf.remove_all_service_listeners()
            zeroconf.close()


def test_first_query_delay():
    """Verify the first query is delayed.

    https://datatracker.ietf.org/doc/html/rfc6762#section-5.2
    """
    type_ = "_http._tcp.local."
    zeroconf_browser = Zeroconf(interfaces=["127.0.0.1"])
    _wait_for_start(zeroconf_browser)

    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf_browser.async_send

    first_query_time = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_query_time
        if first_query_time is None:
            first_query_time = current_time_millis()
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf_browser, "async_send", send):
        # dummy service callback
        def on_service_state_change(zeroconf, service_type, state_change, name):
            pass

        start_time = current_time_millis()
        browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
        try:
            assert (
                current_time_millis() - start_time > _services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[0]
            )
        finally:
            browser.cancel()
            zeroconf_browser.close()


@pytest.mark.asyncio
async def test_asking_default_is_asking_qm_questions_after_the_first_qu():
    """Verify the service browser's first questions are QU and refresh queries are QM."""
    service_added = asyncio.Event()
    service_removed = asyncio.Event()
    unexpected_ttl = asyncio.Event()
    got_query = asyncio.Event()

    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()
            elif state_change is ServiceStateChange.Removed:
                service_removed.set()

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    zeroconf_browser.question_history = QuestionHistoryWithoutSuppression()
    await zeroconf_browser.async_wait_for_start()

    # we are going to patch the zeroconf send to check packet sizes
    old_send = zeroconf_browser.async_send

    expected_ttl = const._DNS_OTHER_TTL
    questions: list[list[DNSQuestion]] = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        questions.append(pout.questions)
        got_query.set()
        old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope)

    assert len(zeroconf_browser.engine.protocols) == 2

    aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_registrar = aio_zeroconf_registrar.zeroconf
    await aio_zeroconf_registrar.zeroconf.async_wait_for_start()

    assert len(zeroconf_registrar.engine.protocols) == 2
    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zeroconf_browser, "async_send", send):
        service_added = asyncio.Event()
        service_removed = asyncio.Event()

        browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aio_zeroconf_registrar.async_register_service(info)
        await task
        loop = asyncio.get_running_loop()
        try:
            await asyncio.wait_for(service_added.wait(), 1)
            assert service_added.is_set()
            # Make sure the startup queries are sent
            original_now = loop.time()
            now_millis = original_now * 1000
            for query_count in range(_services_browser.STARTUP_QUERIES):
                now_millis += (2**query_count) * 1000
                time_changed_millis(now_millis)

            got_query.clear()
            now_millis = original_now * 1000
            assert not unexpected_ttl.is_set()
            # Move time forward past when the TTL is no longer
            # fresh (AKA 75% of the TTL)
            now_millis += (expected_ttl * 1000) * 0.80
            time_changed_millis(now_millis)

            await asyncio.wait_for(got_query.wait(), 1)
            assert not unexpected_ttl.is_set()

            assert len(questions) == _services_browser.STARTUP_QUERIES + 1
            # The first question should be QU to try to
            # populate the known answers and limit the impact
            # of the QM questions that follow. We still
            # have to ask QM questions for the startup queries
            # because some devices will not respond to QU
            assert questions[0][0].unicast is True
            # The remaining questions should be QM questions
            for question in questions[1:]:
                assert question[0].unicast is False
            # Don't remove service, allow close() to cleanup
        finally:
            await aio_zeroconf_registrar.async_close()
            await asyncio.wait_for(service_removed.wait(), 1)
            assert service_removed.is_set()
            await browser.async_cancel()
            await aiozc.async_close()


@pytest.mark.asyncio
async def test_ttl_refresh_cancelled_rescue_query():
    """Verify seeing a name again cancels the rescue query."""
    service_added = asyncio.Event()
    service_removed = asyncio.Event()
    unexpected_ttl = asyncio.Event()
    got_query = asyncio.Event()

    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()
            elif state_change is ServiceStateChange.Removed:
                service_removed.set()

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    zeroconf_browser.question_history = QuestionHistoryWithoutSuppression()
    await zeroconf_browser.async_wait_for_start()

    # we are going to patch the zeroconf send to check packet sizes
    old_send = zeroconf_browser.async_send

    expected_ttl = const._DNS_OTHER_TTL
    packets = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        packets.append(pout)
        got_query.set()
        old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope)

    assert len(zeroconf_browser.engine.protocols) == 2

    aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_registrar = aio_zeroconf_registrar.zeroconf
    await aio_zeroconf_registrar.zeroconf.async_wait_for_start()

    assert len(zeroconf_registrar.engine.protocols) == 2
    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zeroconf_browser, "async_send", send):
        service_added = asyncio.Event()
        service_removed = asyncio.Event()

        browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aio_zeroconf_registrar.async_register_service(info)
        await task
        loop = asyncio.get_running_loop()
        try:
            await asyncio.wait_for(service_added.wait(), 1)
            assert service_added.is_set()
            # Make sure the startup queries are sent
            original_now = loop.time()
            now_millis = original_now * 1000
            for query_count in range(_services_browser.STARTUP_QUERIES):
                now_millis += (2**query_count) * 1000
                time_changed_millis(now_millis)

            now_millis = original_now * 1000
            assert not unexpected_ttl.is_set()
            await asyncio.wait_for(got_query.wait(), 1)
            got_query.clear()
            assert len(packets) == _services_browser.STARTUP_QUERIES
            packets.clear()

            # Move time forward past when the TTL is no longer
            # fresh (AKA 75% of the TTL)
            now_millis += (expected_ttl * 1000) * 0.80
            # Inject a response that will reschedule
            # the rescue query so it does not happen
            with patch("time.monotonic", return_value=now_millis / 1000):
                zeroconf_browser.record_manager.async_updates_from_response(
                    mock_incoming_msg([info.dns_pointer()]),
                )

            time_changed_millis(now_millis)
            await asyncio.sleep(0)

            # Verify we did not send a rescue query
            assert not packets

            # We should still get a rescue query once the rescheduled
            # query time is reached
            now_millis += (expected_ttl * 1000) * 0.76
            time_changed_millis(now_millis)
            await asyncio.wait_for(got_query.wait(), 1)
            assert len(packets) == 1
            # Don't remove service, allow close() to cleanup
        finally:
            await aio_zeroconf_registrar.async_close()
            await asyncio.wait_for(service_removed.wait(), 1)
            assert service_removed.is_set()
            await browser.async_cancel()
            await aiozc.async_close()


@pytest.mark.asyncio
async def test_asking_qm_questions():
    """Verify explicitly asking QM questions."""
    type_ = "_quservice._tcp.local."
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    await zeroconf_browser.async_wait_for_start()
    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf_browser.async_send

    first_outgoing = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_outgoing
        if first_outgoing is None:
            first_outgoing = out
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf_browser, "async_send", send):
        # dummy service callback
        def on_service_state_change(zeroconf, service_type, state_change, name):
            pass

        browser = AsyncServiceBrowser(
            zeroconf_browser,
            type_,
            [on_service_state_change],
            question_type=r.DNSQuestionType.QM,
        )
        await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
        try:
            assert first_outgoing.questions[0].unicast is False  # type: ignore[union-attr]
        finally:
            await browser.async_cancel()
            await aiozc.async_close()


@pytest.mark.asyncio
async def test_asking_qu_questions():
    """Verify the service browser can ask QU questions."""
    type_ = "_quservice._tcp.local."
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    await zeroconf_browser.async_wait_for_start()

    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf_browser.async_send

    first_outgoing = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_outgoing
        if first_outgoing is None:
            first_outgoing = out
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf_browser, "async_send", send):
        # dummy service callback
        def on_service_state_change(zeroconf, service_type, state_change, name):
            pass

        browser = AsyncServiceBrowser(
            zeroconf_browser,
            type_,
            [on_service_state_change],
            question_type=r.DNSQuestionType.QU,
        )
        await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
        try:
            assert first_outgoing.questions[0].unicast is True  # type: ignore[union-attr]
        finally:
            await browser.async_cancel()
            await aiozc.async_close()


def test_legacy_record_update_listener():
    """Test a RecordUpdateListener that does not implement update_records."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    with pytest.raises(RuntimeError):
        r.RecordUpdateListener().update_record(
            zc,
            0,
            r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL),
        )

    updates = []

    class LegacyRecordUpdateListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def update_record(self, zc: Zeroconf, now: float, record: r.DNSRecord) -> None:
            nonlocal updates
            updates.append(record)

    listener = LegacyRecordUpdateListener()

    zc.add_listener(listener, None)

    # dummy service callback
    def on_service_state_change(zeroconf, service_type, state_change, name):
        pass

    # start a browser
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    browser = ServiceBrowser(zc, type_, [on_service_state_change])

    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    zc.register_service(info_service)

    time.sleep(0.001)

    browser.cancel()

    assert len(updates)
    assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1

    zc.remove_listener(listener)
    # Removing a second time should not throw
    zc.remove_listener(listener)

    zc.close()


def test_service_browser_is_aware_of_port_changes():
    """Test that the ServiceBrowser is aware of port changes."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    callbacks = []

    # dummy service callback
    def on_service_state_change(zeroconf, service_type, state_change, name):
        """Dummy callback."""
        nonlocal callbacks
        if name == registration_name:
            callbacks.append((service_type, state_change, name))

    browser = ServiceBrowser(zc, type_, [on_service_state_change])

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])

    _inject_response(
        zc,
        mock_incoming_msg(
            [
                info.dns_pointer(),
                info.dns_service(),
                info.dns_text(),
                *info.dns_addresses(),
            ]
        ),
    )
    time.sleep(0.1)

    assert callbacks == [("_hap._tcp.local.", ServiceStateChange.Added, "xxxyyy._hap._tcp.local.")]
    service_info = zc.get_service_info(type_, registration_name)
    assert service_info is not None
    assert service_info.port == 80

    info.port = 400
    info._dns_service_cache = None  # we are mutating the record so clear the cache

    _inject_response(
        zc,
        mock_incoming_msg([info.dns_service()]),
    )
    time.sleep(0.1)

    assert callbacks == [
        ("_hap._tcp.local.", ServiceStateChange.Added, "xxxyyy._hap._tcp.local."),
        ("_hap._tcp.local.", ServiceStateChange.Updated, "xxxyyy._hap._tcp.local."),
    ]
    service_info = zc.get_service_info(type_, registration_name)
    assert service_info is not None
    assert service_info.port == 400
    browser.cancel()

    zc.close()


def test_service_browser_listeners_update_service():
    """Test that the ServiceBrowser ServiceListener that implements update_service."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    callbacks = []

    class MyServiceListener(r.ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("update", type_, name))

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])

    _inject_response(
        zc,
        mock_incoming_msg(
            [
                info.dns_pointer(),
                info.dns_service(),
                info.dns_text(),
                *info.dns_addresses(),
            ]
        ),
    )
    time.sleep(0.2)
    info._dns_service_cache = None  # we are mutating the record so clear the cache

    info.port = 400
    _inject_response(
        zc,
        mock_incoming_msg([info.dns_service()]),
    )
    time.sleep(0.2)

    assert callbacks == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
    ]
    browser.cancel()

    zc.close()


def test_service_browser_listeners_no_update_service():
    """Test that the ServiceBrowser ServiceListener that does not implement update_service."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_hap._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    callbacks = []

    class MyServiceListener(r.ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])

    _inject_response(
        zc,
        mock_incoming_msg(
            [
                info.dns_pointer(),
                info.dns_service(),
                info.dns_text(),
                *info.dns_addresses(),
            ]
        ),
    )
    time.sleep(0.2)
    info.port = 400
    info._dns_service_cache = None  # we are mutating the record so clear the cache

    _inject_response(
        zc,
        mock_incoming_msg([info.dns_service()]),
    )
    time.sleep(0.2)

    assert callbacks == [
        ("add", type_, registration_name),
    ]
    browser.cancel()

    zc.close()


def test_service_browser_uses_non_strict_names():
    """Verify we can look for technically invalid names as we cannot change what others do."""

    # dummy service callback
    def on_service_state_change(zeroconf, service_type, state_change, name):
        pass

    zc = r.Zeroconf(interfaces=["127.0.0.1"])
    browser = ServiceBrowser(zc, ["_tivo-videostream._tcp.local."], [on_service_state_change])
    browser.cancel()

    # Still fail on completely invalid
    with pytest.raises(r.BadTypeInNameException):
        browser = ServiceBrowser(zc, ["tivo-videostream._tcp.local."], [on_service_state_change])
    zc.close()


def test_group_ptr_queries_with_known_answers():
    questions_with_known_answers: _services_browser._QuestionWithKnownAnswers = {}
    now = current_time_millis()
    for i in range(120):
        name = f"_hap{i}._tcp._local."
        questions_with_known_answers[DNSQuestion(name, const._TYPE_PTR, const._CLASS_IN)] = {
            DNSPointer(
                name,
                const._TYPE_PTR,
                const._CLASS_IN,
                4500,
                f"zoo{counter}.{name}",
            )
            for counter in range(i)
        }
    outs = _services_browser.group_ptr_queries_with_known_answers(now, True, questions_with_known_answers)
    for out in outs:
        packets = out.packets()
        # If we generate multiple packets there must
        # only be one question
        assert len(packets) == 1 or len(out.questions) == 1


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_generate_service_query_suppress_duplicate_questions():
    """Generate a service query for sending with zeroconf.send."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    now = current_time_millis()
    name = "_suppresstest._tcp.local."
    question = r.DNSQuestion(name, const._TYPE_PTR, const._CLASS_IN)
    answer = r.DNSPointer(
        name,
        const._TYPE_PTR,
        const._CLASS_IN,
        10000,
        f"known-to-other.{name}",
    )
    other_known_answers: set[r.DNSRecord] = {answer}
    zc.question_history.add_question_at_time(question, now, other_known_answers)
    assert zc.question_history.suppresses(question, now, other_known_answers)

    # The known answer list is different, do not suppress
    outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None)
    assert outs

    zc.cache.async_add_records([answer])
    # The known answer list contains all the asked questions in the history
    # we should suppress

    outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None)
    assert not outs

    # We do not suppress once the question history expires
    outs = _services_browser.generate_service_query(
        zc, now + 1000, {name}, multicast=True, question_type=None
    )
    assert outs

    # We do not suppress QU queries ever
    outs = _services_browser.generate_service_query(zc, now, {name}, multicast=False, question_type=None)
    assert outs

    zc.question_history.async_expire(now + 2000)
    # No suppression after clearing the history
    outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None)
    assert outs

    # The previous query we just sent is still remembered and
    # the next one is suppressed
    outs = _services_browser.generate_service_query(zc, now, {name}, multicast=True, question_type=None)
    assert not outs

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_query_scheduler():
    delay = const._BROWSER_TIME
    types_ = {"_hap._tcp.local.", "_http._tcp.local."}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    zc = aiozc.zeroconf
    sends: list[r.DNSIncoming] = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        sends.append(pout)

    query_scheduler = _services_browser.QueryScheduler(zc, types_, None, 0, True, delay, (0, 0), None)
    loop = asyncio.get_running_loop()

    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zc, "async_send", send):
        query_scheduler.start(loop)

        original_now = loop.time()
        now_millis = original_now * 1000
        for query_count in range(_services_browser.STARTUP_QUERIES):
            now_millis += (2**query_count) * 1000
            time_changed_millis(now_millis)

        ptr_record = r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "zoomer._hap._tcp.local.",
        )
        ptr2_record = r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "disappear._hap._tcp.local.",
        )

        query_scheduler.reschedule_ptr_first_refresh(ptr_record)
        expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT)
        expected_expire_time = ptr_record.get_expiration_time(100)
        ptr_query = _ScheduledPTRQuery(
            ptr_record.alias,
            ptr_record.name,
            int(ptr_record.ttl),
            expected_expire_time,
            expected_when_time,
        )
        assert query_scheduler._query_heap == [ptr_query]

        query_scheduler.reschedule_ptr_first_refresh(ptr2_record)
        expected_when_time = ptr2_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT)
        expected_expire_time = ptr2_record.get_expiration_time(100)
        ptr2_query = _ScheduledPTRQuery(
            ptr2_record.alias,
            ptr2_record.name,
            int(ptr2_record.ttl),
            expected_expire_time,
            expected_when_time,
        )

        assert query_scheduler._query_heap == [ptr_query, ptr2_query]

        # Simulate PTR one goodbye

        query_scheduler.cancel_ptr_refresh(ptr_record)
        ptr_query.cancelled = True

        assert query_scheduler._query_heap == [ptr_query, ptr2_query]
        assert query_scheduler._query_heap[0].cancelled is True
        assert query_scheduler._query_heap[1].cancelled is False

        # Move time forward past when the TTL is no longer
        # fresh (AKA 75% of the TTL)
        now_millis += (ptr2_record.ttl * 1000) * 0.80
        time_changed_millis(now_millis)
        assert len(query_scheduler._query_heap) == 1
        first_heap = query_scheduler._query_heap[0]
        assert first_heap.cancelled is False
        assert first_heap.alias == ptr2_record.alias

        # Move time forward past when the record expires
        now_millis += (ptr2_record.ttl * 1000) * 0.20
        time_changed_millis(now_millis)
        assert len(query_scheduler._query_heap) == 0

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_query_scheduler_rescue_records():
    delay = const._BROWSER_TIME
    types_ = {"_hap._tcp.local.", "_http._tcp.local."}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    zc = aiozc.zeroconf
    sends: list[r.DNSIncoming] = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        sends.append(pout)

    query_scheduler = _services_browser.QueryScheduler(zc, types_, None, 0, True, delay, (0, 0), None)
    loop = asyncio.get_running_loop()

    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zc, "async_send", send):
        query_scheduler.start(loop)

        original_now = loop.time()
        now_millis = original_now * 1000
        for query_count in range(_services_browser.STARTUP_QUERIES):
            now_millis += (2**query_count) * 1000
            time_changed_millis(now_millis)

        ptr_record = r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "zoomer._hap._tcp.local.",
        )

        query_scheduler.reschedule_ptr_first_refresh(ptr_record)
        expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT)
        expected_expire_time = ptr_record.get_expiration_time(100)
        ptr_query = _ScheduledPTRQuery(
            ptr_record.alias,
            ptr_record.name,
            int(ptr_record.ttl),
            expected_expire_time,
            expected_when_time,
        )
        assert query_scheduler._query_heap == [ptr_query]
        assert query_scheduler._query_heap[0].cancelled is False

        # Move time forward past when the TTL is no longer
        # fresh (AKA 75% of the TTL)
        now_millis += (ptr_record.ttl * 1000) * 0.76
        time_changed_millis(now_millis)
        assert len(query_scheduler._query_heap) == 1
        new_when = query_scheduler._query_heap[0].when_millis
        assert query_scheduler._query_heap[0].cancelled is False
        assert new_when >= expected_when_time

        # Move time forward again, but not enough to expire the
        # record to make sure we try to rescue it
        now_millis += (ptr_record.ttl * 1000) * 0.11
        time_changed_millis(now_millis)
        assert len(query_scheduler._query_heap) == 1
        second_new_when = query_scheduler._query_heap[0].when_millis
        assert query_scheduler._query_heap[0].cancelled is False
        assert second_new_when >= new_when

        # Move time forward again, enough that we will no longer
        # try to rescue the record
        now_millis += (ptr_record.ttl * 1000) * 0.11
        time_changed_millis(now_millis)
        assert len(query_scheduler._query_heap) == 0

    await aiozc.async_close()


def test_service_browser_matching():
    """Test that the ServiceBrowser matching does not match partial names."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    not_match_type_ = "_asustor-looksgood_http._tcp.local."
    not_match_registration_name = f"xxxyyy.{not_match_type_}"
    callbacks = []

    class MyServiceListener(r.ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("update", type_, name))

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
    should_not_match = ServiceInfo(
        not_match_type_,
        not_match_registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[address],
    )

    _inject_response(
        zc,
        mock_incoming_msg(
            [
                info.dns_pointer(),
                info.dns_service(),
                info.dns_text(),
                *info.dns_addresses(),
            ]
        ),
    )
    _inject_response(
        zc,
        mock_incoming_msg(
            [
                should_not_match.dns_pointer(),
                should_not_match.dns_service(),
                should_not_match.dns_text(),
                *should_not_match.dns_addresses(),
            ]
        ),
    )
    time.sleep(0.2)
    info.port = 400
    info._dns_service_cache = None  # we are mutating the record so clear the cache

    _inject_response(
        zc,
        mock_incoming_msg([info.dns_service()]),
    )
    should_not_match.port = 400
    _inject_response(
        zc,
        mock_incoming_msg([should_not_match.dns_service()]),
    )
    time.sleep(0.2)

    assert callbacks == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
    ]
    browser.cancel()

    zc.close()


@patch.object(_engine, "_CACHE_CLEANUP_INTERVAL", 0.01)
def test_service_browser_expire_callbacks():
    """Test that the ServiceBrowser matching does not match partial names."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    # start a browser
    type_ = "_old._tcp.local."
    registration_name = f"uniquezip323.{type_}"
    callbacks = []

    class MyServiceListener(r.ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("update", type_, name))

    listener = MyServiceListener()

    browser = r.ServiceBrowser(zc, type_, None, listener)

    desc = {"path": "/~paul2/"}
    address_parsed = "10.0.1.3"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "newname-2.local.",
        host_ttl=1,
        other_ttl=1,
        addresses=[address],
    )

    _inject_response(
        zc,
        mock_incoming_msg(
            [
                info.dns_pointer(),
                info.dns_service(),
                info.dns_text(),
                *info.dns_addresses(),
            ]
        ),
    )
    # Force the ttl to be 1 second
    now = current_time_millis()
    for cache_record in list(zc.cache.cache.values()):
        for record in cache_record:
            zc.cache._async_set_created_ttl(record, now, 1)

    time.sleep(0.3)
    info.port = 400
    info._dns_service_cache = None  # we are mutating the record so clear the cache

    _inject_response(
        zc,
        mock_incoming_msg([info.dns_service()]),
    )

    for _ in range(10):
        time.sleep(0.05)
        if len(callbacks) == 2:
            break

    assert callbacks == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
    ]

    for _ in range(25):
        time.sleep(0.05)
        if len(callbacks) == 3:
            break

    assert callbacks == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
        ("remove", type_, registration_name),
    ]
    browser.cancel()

    zc.close()


def test_scheduled_ptr_query_dunder_methods():
    query75 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 120, 75)
    query80 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 120, 80)
    query75_2 = _ScheduledPTRQuery("zoomy._hap._tcp.local.", "_hap._tcp.local.", 120, 140, 75)
    other = object()
    stringified = str(query75)
    assert "zoomy._hap._tcp.local." in stringified
    assert "120" in stringified
    assert "75" in stringified
    assert "ScheduledPTRQuery" in stringified

    assert query75 == query75
    assert query75 != query80
    assert query75 == query75_2
    assert query75 < query80
    assert query75 <= query80
    assert query80 > query75
    assert query80 >= query75

    assert query75 != other
    with pytest.raises(TypeError):
        assert query75 < other  # type: ignore[operator]
    with pytest.raises(TypeError):
        assert query75 <= other  # type: ignore[operator]
    with pytest.raises(TypeError):
        assert query75 > other  # type: ignore[operator]
    with pytest.raises(TypeError):
        assert query75 >= other  # type: ignore[operator]


@pytest.mark.asyncio
async def test_close_zeroconf_without_browser_before_start_up_queries():
    """Test that we stop sending startup queries if zeroconf is closed out from under the browser."""
    service_added = asyncio.Event()
    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    zeroconf_browser.question_history = QuestionHistoryWithoutSuppression()
    await zeroconf_browser.async_wait_for_start()

    sends: list[r.DNSIncoming] = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        sends.append(pout)

    assert len(zeroconf_browser.engine.protocols) == 2

    aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_registrar = aio_zeroconf_registrar.zeroconf
    await aio_zeroconf_registrar.zeroconf.async_wait_for_start()

    assert len(zeroconf_registrar.engine.protocols) == 2
    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zeroconf_browser, "async_send", send):
        service_added = asyncio.Event()

        browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aio_zeroconf_registrar.async_register_service(info)
        await task
        loop = asyncio.get_running_loop()
        try:
            await asyncio.wait_for(service_added.wait(), 1)
            assert service_added.is_set()
            await aiozc.async_close()
            sends.clear()
            # Make sure the startup queries are sent
            original_now = loop.time()
            now_millis = original_now * 1000
            for query_count in range(_services_browser.STARTUP_QUERIES):
                now_millis += (2**query_count) * 1000
                time_changed_millis(now_millis)

            # We should not send any queries after close
            assert not sends
        finally:
            await aio_zeroconf_registrar.async_close()
            await browser.async_cancel()


@pytest.mark.asyncio
async def test_close_zeroconf_without_browser_after_start_up_queries():
    """Test that we stop sending rescue queries if zeroconf is closed out from under the browser."""
    service_added = asyncio.Event()

    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    zeroconf_browser.question_history = QuestionHistoryWithoutSuppression()
    await zeroconf_browser.async_wait_for_start()

    sends: list[r.DNSIncoming] = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = r.DNSIncoming(out.packets()[0])
        sends.append(pout)

    assert len(zeroconf_browser.engine.protocols) == 2

    aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_registrar = aio_zeroconf_registrar.zeroconf
    await aio_zeroconf_registrar.zeroconf.async_wait_for_start()

    assert len(zeroconf_registrar.engine.protocols) == 2
    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zeroconf_browser, "async_send", send):
        service_added = asyncio.Event()
        browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        expected_ttl = const._DNS_OTHER_TTL
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aio_zeroconf_registrar.async_register_service(info)
        await task
        loop = asyncio.get_running_loop()
        try:
            await asyncio.wait_for(service_added.wait(), 1)
            assert service_added.is_set()
            sends.clear()
            # Make sure the startup queries are sent
            original_now = loop.time()
            now_millis = original_now * 1000
            for query_count in range(_services_browser.STARTUP_QUERIES):
                now_millis += (2**query_count) * 1000
                time_changed_millis(now_millis)

            # We should not send any queries after close
            assert sends

            await aiozc.async_close()
            sends.clear()

            now_millis = original_now * 1000
            # Move time forward past when the TTL is no longer
            # fresh (AKA 75% of the TTL)
            now_millis += (expected_ttl * 1000) * 0.80
            time_changed_millis(now_millis)

            # We should not send the query after close
            assert not sends
        finally:
            await aio_zeroconf_registrar.async_close()
            await browser.async_cancel()
07070100000070000081A400000000000000000000000167C7AD160000F4AB000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/tests/services/test_info.py"""Unit tests for zeroconf._services.info."""

from __future__ import annotations

import asyncio
import logging
import os
import socket
import threading
import unittest
from collections.abc import Iterable
from ipaddress import ip_address
from threading import Event
from unittest.mock import patch

import pytest

import zeroconf as r
from zeroconf import DNSAddress, RecordUpdate, const
from zeroconf._services import info
from zeroconf._services.info import ServiceInfo
from zeroconf._utils.net import IPVersion
from zeroconf.asyncio import AsyncZeroconf

from .. import _inject_response, has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class TestServiceInfo(unittest.TestCase):
    def test_get_name(self):
        """Verify the name accessor can strip the type."""
        desc = {"path": "/~paulsm/"}
        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_address = socket.inet_aton("10.0.1.2")
        info = ServiceInfo(
            service_type,
            service_name,
            22,
            0,
            0,
            desc,
            service_server,
            addresses=[service_address],
        )
        assert info.get_name() == "name"

    def test_service_info_rejects_non_matching_updates(self):
        """Verify records with the wrong name are rejected."""

        zc = r.Zeroconf(interfaces=["127.0.0.1"])
        desc = {"path": "/~paulsm/"}
        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_address = socket.inet_aton("10.0.1.2")
        ttl = 120
        now = r.current_time_millis()
        info = ServiceInfo(
            service_type,
            service_name,
            22,
            0,
            0,
            desc,
            service_server,
            addresses=[service_address],
        )
        # Verify backwards compatibility with calling with None
        info.async_update_records(zc, now, [])
        # Matching updates
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSText(
                        service_name,
                        const._TYPE_TXT,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
                    ),
                    None,
                )
            ],
        )
        assert info.properties[b"ci"] == b"2"
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSService(
                        service_name,
                        const._TYPE_SRV,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        0,
                        0,
                        80,
                        "ASH-2.local.",
                    ),
                    None,
                )
            ],
        )
        assert info.server_key == "ash-2.local."
        assert info.server == "ASH-2.local."
        new_address = socket.inet_aton("10.0.1.3")
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSAddress(
                        "ASH-2.local.",
                        const._TYPE_A,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        new_address,
                    ),
                    None,
                )
            ],
        )
        assert new_address in info.addresses
        # Non-matching updates
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSText(
                        "incorrect.name.",
                        const._TYPE_TXT,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        b"\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==",
                    ),
                    None,
                )
            ],
        )
        assert info.properties[b"ci"] == b"2"
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSService(
                        "incorrect.name.",
                        const._TYPE_SRV,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        0,
                        0,
                        80,
                        "ASH-2.local.",
                    ),
                    None,
                )
            ],
        )
        assert info.server_key == "ash-2.local."
        assert info.server == "ASH-2.local."
        new_address = socket.inet_aton("10.0.1.4")
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSAddress(
                        "incorrect.name.",
                        const._TYPE_A,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        new_address,
                    ),
                    None,
                )
            ],
        )
        assert new_address not in info.addresses
        zc.close()

    def test_service_info_rejects_expired_records(self):
        """Verify records that are expired are rejected."""
        zc = r.Zeroconf(interfaces=["127.0.0.1"])
        desc = {"path": "/~paulsm/"}
        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_address = socket.inet_aton("10.0.1.2")
        ttl = 120
        now = r.current_time_millis()
        info = ServiceInfo(
            service_type,
            service_name,
            22,
            0,
            0,
            desc,
            service_server,
            addresses=[service_address],
        )
        # Matching updates
        info.async_update_records(
            zc,
            now,
            [
                RecordUpdate(
                    r.DNSText(
                        service_name,
                        const._TYPE_TXT,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
                    ),
                    None,
                )
            ],
        )
        assert info.properties[b"ci"] == b"2"
        # Expired record
        expired_record = r.DNSText(
            service_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            ttl,
            b"\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==",
        )
        zc.cache._async_set_created_ttl(expired_record, 1000, 1)
        info.async_update_records(zc, now, [RecordUpdate(expired_record, None)])
        assert info.properties[b"ci"] == b"2"
        zc.close()

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_get_info_partial(self):
        zc = r.Zeroconf(interfaces=["127.0.0.1"])

        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_text = b"path=/~matt1/"
        service_address = "10.0.1.2"
        service_address_v6_ll = "fe80::52e:c2f2:bc5f:e9c6"
        service_scope_id = 12

        service_info = None
        send_event = Event()
        service_info_event = Event()

        last_sent: r.DNSOutgoing | None = None

        def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
            """Sends an outgoing packet."""
            nonlocal last_sent

            last_sent = out
            send_event.set()

        # patch the zeroconf send
        with patch.object(zc, "async_send", send):

            def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming:
                generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)

                for record in records:
                    generated.add_answer_at_time(record, 0)

                return r.DNSIncoming(generated.packets()[0])

            def get_service_info_helper(zc, type, name):
                nonlocal service_info
                service_info = zc.get_service_info(type, name)
                service_info_event.set()

            try:
                ttl = 120
                helper_thread = threading.Thread(
                    target=get_service_info_helper,
                    args=(zc, service_type, service_name),
                )
                helper_thread.start()
                wait_time = 1

                # Expect query for SRV, TXT, A, AAAA
                send_event.wait(wait_time)
                assert last_sent is not None
                assert len(last_sent.questions) == 4
                assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
                assert service_info is None

                # Expect query for SRV, A, AAAA
                last_sent = None
                send_event.clear()
                _inject_response(
                    zc,
                    mock_incoming_msg(
                        [
                            r.DNSText(
                                service_name,
                                const._TYPE_TXT,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                service_text,
                            )
                        ]
                    ),
                )
                send_event.wait(wait_time)
                assert last_sent is not None
                assert len(last_sent.questions) == 3  # type: ignore[unreachable]
                assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
                assert service_info is None

                # Expect query for A, AAAA
                last_sent = None
                send_event.clear()
                _inject_response(
                    zc,
                    mock_incoming_msg(
                        [
                            r.DNSService(
                                service_name,
                                const._TYPE_SRV,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                0,
                                0,
                                80,
                                service_server,
                            )
                        ]
                    ),
                )
                send_event.wait(wait_time)
                assert last_sent is not None
                assert len(last_sent.questions) == 2
                assert r.DNSQuestion(service_server, const._TYPE_A, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_server, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
                last_sent = None
                assert service_info is None

                # Expect no further queries
                last_sent = None
                send_event.clear()
                _inject_response(
                    zc,
                    mock_incoming_msg(
                        [
                            r.DNSAddress(
                                service_server,
                                const._TYPE_A,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                socket.inet_pton(socket.AF_INET, service_address),
                            ),
                            r.DNSAddress(
                                service_server,
                                const._TYPE_AAAA,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                socket.inet_pton(socket.AF_INET6, service_address_v6_ll),
                                scope_id=service_scope_id,
                            ),
                        ]
                    ),
                )
                send_event.wait(wait_time)
                assert last_sent is None
                assert service_info is not None

            finally:
                helper_thread.join()
                zc.remove_all_service_listeners()
                zc.close()

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_get_info_suppressed_by_question_history(self):
        zc = r.Zeroconf(interfaces=["127.0.0.1"])

        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."

        service_info = None
        send_event = Event()
        service_info_event = Event()

        last_sent: r.DNSOutgoing | None = None

        def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
            """Sends an outgoing packet."""
            nonlocal last_sent

            last_sent = out
            send_event.set()

        # patch the zeroconf send
        with patch.object(zc, "async_send", send):

            def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming:
                generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)

                for record in records:
                    generated.add_answer_at_time(record, 0)

                return r.DNSIncoming(generated.packets()[0])

            def get_service_info_helper(zc, type, name):
                nonlocal service_info
                service_info = zc.get_service_info(type, name)
                service_info_event.set()

            try:
                helper_thread = threading.Thread(
                    target=get_service_info_helper,
                    args=(zc, service_type, service_name),
                )
                helper_thread.start()
                wait_time = (const._LISTENER_TIME + info._AVOID_SYNC_DELAY_RANDOM_INTERVAL[1] + 5) / 1000

                # Expect query for SRV, TXT, A, AAAA
                send_event.wait(wait_time)
                assert last_sent is not None
                assert len(last_sent.questions) == 4
                assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
                assert service_info is None

                # Expect query for SRV only as A, AAAA, and TXT are suppressed
                # by the question history
                last_sent = None
                send_event.clear()
                for _ in range(3):
                    send_event.wait(
                        wait_time * 0.25
                    )  # Wait long enough to be inside the question history window
                    now = r.current_time_millis()
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN),
                        now,
                        set(),
                    )
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN),
                        now,
                        set(),
                    )
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN),
                        now,
                        set(),
                    )
                send_event.wait(wait_time * 0.25)
                assert last_sent is not None
                assert len(last_sent.questions) == 1  # type: ignore[unreachable]
                assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
                assert service_info is None

                wait_time = (
                    const._DUPLICATE_QUESTION_INTERVAL + info._AVOID_SYNC_DELAY_RANDOM_INTERVAL[1] + 5
                ) / 1000
                # Expect no queries as all are suppressed by the question history
                last_sent = None
                send_event.clear()
                for _ in range(3):
                    send_event.wait(
                        wait_time * 0.25
                    )  # Wait long enough to be inside the question history window
                    now = r.current_time_millis()
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN),
                        now,
                        set(),
                    )
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN),
                        now,
                        set(),
                    )
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN),
                        now,
                        set(),
                    )
                    zc.question_history.add_question_at_time(
                        r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN),
                        now,
                        set(),
                    )
                send_event.wait(wait_time * 0.25)
                # All questions are suppressed so no query should be sent
                assert last_sent is None
                assert service_info is None

            finally:
                helper_thread.join()
                zc.remove_all_service_listeners()
                zc.close()

    def test_get_info_single(self):
        zc = r.Zeroconf(interfaces=["127.0.0.1"])

        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_text = b"path=/~matt1/"
        service_address = "10.0.1.2"

        service_info = None
        send_event = Event()
        service_info_event = Event()

        last_sent: r.DNSOutgoing | None = None

        def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
            """Sends an outgoing packet."""
            nonlocal last_sent

            last_sent = out
            send_event.set()

        # patch the zeroconf send
        with patch.object(zc, "async_send", send):

            def mock_incoming_msg(records: Iterable[r.DNSRecord]) -> r.DNSIncoming:
                generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)

                for record in records:
                    generated.add_answer_at_time(record, 0)

                return r.DNSIncoming(generated.packets()[0])

            def get_service_info_helper(zc, type, name):
                nonlocal service_info
                service_info = zc.get_service_info(type, name)
                service_info_event.set()

            try:
                ttl = 120
                helper_thread = threading.Thread(
                    target=get_service_info_helper,
                    args=(zc, service_type, service_name),
                )
                helper_thread.start()
                wait_time = 1

                # Expect query for SRV, TXT, A, AAAA
                send_event.wait(wait_time)
                assert last_sent is not None
                assert len(last_sent.questions) == 4
                assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
                assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
                assert service_info is None

                # Expect no further queries
                last_sent = None
                send_event.clear()
                _inject_response(
                    zc,
                    mock_incoming_msg(
                        [
                            r.DNSText(
                                service_name,
                                const._TYPE_TXT,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                service_text,
                            ),
                            r.DNSService(
                                service_name,
                                const._TYPE_SRV,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                0,
                                0,
                                80,
                                service_server,
                            ),
                            r.DNSAddress(
                                service_server,
                                const._TYPE_A,
                                const._CLASS_IN | const._CLASS_UNIQUE,
                                ttl,
                                socket.inet_pton(socket.AF_INET, service_address),
                            ),
                        ]
                    ),
                )
                send_event.wait(wait_time)
                assert last_sent is None
                assert service_info is not None

            finally:
                helper_thread.join()
                zc.remove_all_service_listeners()
                zc.close()

    def test_service_info_duplicate_properties_txt_records(self):
        """Verify the first property is always used when there are duplicates in a txt record."""

        zc = r.Zeroconf(interfaces=["127.0.0.1"])
        desc = {"path": "/~paulsm/"}
        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-1.local."
        service_address = socket.inet_aton("10.0.1.2")
        ttl = 120
        now = r.current_time_millis()
        info = ServiceInfo(
            service_type,
            service_name,
            22,
            0,
            0,
            desc,
            service_server,
            addresses=[service_address],
        )
        info.async_update_records(
            zc,
            now,
            [
                r.RecordUpdate(
                    r.DNSText(
                        service_name,
                        const._TYPE_TXT,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==\x04dd=0\x04jl=2\x04qq=0\x0brr=6fLM5A==\x04ci=3",
                    ),
                    None,
                )
            ],
        )
        assert info.properties[b"dd"] == b"0"
        assert info.properties[b"jl"] == b"2"
        assert info.properties[b"ci"] == b"2"
        zc.close()


def test_multiple_addresses():
    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)

    # New kwarg way
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[address, address],
    )

    assert info.addresses == [address, address]
    assert info.parsed_addresses() == [address_parsed, address_parsed]
    assert info.parsed_scoped_addresses() == [address_parsed, address_parsed]

    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        parsed_addresses=[address_parsed, address_parsed],
    )
    assert info.addresses == [address, address]
    assert info.parsed_addresses() == [address_parsed, address_parsed]
    assert info.parsed_scoped_addresses() == [address_parsed, address_parsed]

    if has_working_ipv6() and not os.environ.get("SKIP_IPV6"):
        address_v6_parsed = "2001:db8::1"
        address_v6 = socket.inet_pton(socket.AF_INET6, address_v6_parsed)
        address_v6_ll_parsed = "fe80::52e:c2f2:bc5f:e9c6"
        address_v6_ll_scoped_parsed = "fe80::52e:c2f2:bc5f:e9c6%12"
        address_v6_ll = socket.inet_pton(socket.AF_INET6, address_v6_ll_parsed)
        interface_index = 12
        infos = [
            ServiceInfo(
                type_,
                registration_name,
                80,
                0,
                0,
                desc,
                "ash-2.local.",
                addresses=[address, address_v6, address_v6_ll],
                interface_index=interface_index,
            ),
            ServiceInfo(
                type_,
                registration_name,
                80,
                0,
                0,
                desc,
                "ash-2.local.",
                parsed_addresses=[
                    address_parsed,
                    address_v6_parsed,
                    address_v6_ll_parsed,
                ],
                interface_index=interface_index,
            ),
        ]
        for info in infos:
            assert info.addresses == [address]
            assert info.addresses_by_version(r.IPVersion.All) == [
                address,
                address_v6,
                address_v6_ll,
            ]
            assert info.ip_addresses_by_version(r.IPVersion.All) == [
                ip_address(address),
                ip_address(address_v6),
                ip_address(address_v6_ll_scoped_parsed),
            ]
            assert info.addresses_by_version(r.IPVersion.V4Only) == [address]
            assert info.ip_addresses_by_version(r.IPVersion.V4Only) == [ip_address(address)]
            assert info.addresses_by_version(r.IPVersion.V6Only) == [
                address_v6,
                address_v6_ll,
            ]
            assert info.ip_addresses_by_version(r.IPVersion.V6Only) == [
                ip_address(address_v6),
                ip_address(address_v6_ll_scoped_parsed),
            ]
            assert info.parsed_addresses() == [
                address_parsed,
                address_v6_parsed,
                address_v6_ll_parsed,
            ]
            assert info.parsed_addresses(r.IPVersion.V4Only) == [address_parsed]
            assert info.parsed_addresses(r.IPVersion.V6Only) == [
                address_v6_parsed,
                address_v6_ll_parsed,
            ]
            assert info.parsed_scoped_addresses() == [
                address_parsed,
                address_v6_parsed,
                address_v6_ll_scoped_parsed,
            ]
            assert info.parsed_scoped_addresses(r.IPVersion.V4Only) == [address_parsed]
            assert info.parsed_scoped_addresses(r.IPVersion.V6Only) == [
                address_v6_parsed,
                address_v6_ll_scoped_parsed,
            ]


def test_scoped_addresses_from_cache():
    type_ = "_http._tcp.local."
    registration_name = f"scoped.{type_}"
    zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])
    host = "scoped.local."

    zeroconf.cache.async_add_records(
        [
            r.DNSPointer(
                type_,
                const._TYPE_PTR,
                const._CLASS_IN | const._CLASS_UNIQUE,
                120,
                registration_name,
            ),
            r.DNSService(
                registration_name,
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                120,
                0,
                0,
                80,
                host,
            ),
            r.DNSAddress(
                host,
                const._TYPE_AAAA,
                const._CLASS_IN | const._CLASS_UNIQUE,
                120,
                socket.inet_pton(socket.AF_INET6, "fe80::52e:c2f2:bc5f:e9c6"),
                scope_id=12,
            ),
        ]
    )

    # New kwarg way
    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(zeroconf)
    assert info.parsed_scoped_addresses() == ["fe80::52e:c2f2:bc5f:e9c6%12"]
    assert info.ip_addresses_by_version(r.IPVersion.V6Only) == [ip_address("fe80::52e:c2f2:bc5f:e9c6%12")]
    zeroconf.close()


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_multiple_a_addresses_newest_address_first():
    """Test that info.addresses returns the newest seen address first."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    cache = aiozc.zeroconf.cache
    host = "multahost.local."
    record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"\x7f\x00\x00\x01")
    record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"\x7f\x00\x00\x02")
    cache.async_add_records([record1, record2])

    # New kwarg way
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses == [b"\x7f\x00\x00\x02", b"\x7f\x00\x00\x01"]
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_invalid_a_addresses(caplog):
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    cache = aiozc.zeroconf.cache
    host = "multahost.local."
    record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"a")
    record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"b")
    cache.async_add_records([record1, record2])

    # New kwarg way
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
    info.load_from_cache(aiozc.zeroconf)
    assert not info.addresses
    assert "Encountered invalid address while processing record" in caplog.text

    await aiozc.async_close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_filter_address_by_type_from_service_info():
    """Verify dns_addresses can filter by ipversion."""
    desc = {"path": "/~paulsm/"}
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    registration_name = f"{name}.{type_}"
    ipv4 = socket.inet_aton("10.0.1.2")
    ipv6 = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[ipv4, ipv6])

    def dns_addresses_to_addresses(dns_address: list[DNSAddress]) -> list[bytes]:
        return [address.address for address in dns_address]

    assert dns_addresses_to_addresses(info.dns_addresses()) == [ipv4, ipv6]
    assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.All)) == [
        ipv4,
        ipv6,
    ]
    assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V4Only)) == [ipv4]
    assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V6Only)) == [ipv6]


def test_changing_name_updates_serviceinfo_key():
    """Verify a name change will adjust the underlying key value."""
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    assert info_service.key == "mytesthome._homeassistant._tcp.local."
    info_service.name = "YourTestHome._homeassistant._tcp.local."
    assert info_service.key == "yourtesthome._homeassistant._tcp.local."


def test_serviceinfo_address_updates():
    """Verify adding/removing/setting addresses on ServiceInfo."""
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"

    # Verify addresses and parsed_addresses are mutually exclusive
    with pytest.raises(TypeError):
        info_service = ServiceInfo(
            type_,
            f"{name}.{type_}",
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
            parsed_addresses=["10.0.1.2"],
        )

    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info_service.addresses = [socket.inet_aton("10.0.1.3")]
    assert info_service.addresses == [socket.inet_aton("10.0.1.3")]


def test_serviceinfo_accepts_bytes_or_string_dict():
    """Verify a bytes or string dict can be passed to ServiceInfo."""
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    addresses = [socket.inet_aton("10.0.1.2")]
    server_name = "ash-2.local."
    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {b"path": b"/~paulsm/"},
        server_name,
        addresses=addresses,
    )
    assert info_service.dns_text().text == b"\x0epath=/~paulsm/"
    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        server_name,
        addresses=addresses,
    )
    assert info_service.dns_text().text == b"\x0epath=/~paulsm/"
    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {b"path": "/~paulsm/"},
        server_name,
        addresses=addresses,
    )
    assert info_service.dns_text().text == b"\x0epath=/~paulsm/"
    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": b"/~paulsm/"},
        server_name,
        addresses=addresses,
    )
    assert info_service.dns_text().text == b"\x0epath=/~paulsm/"


def test_asking_qu_questions():
    """Verify explicitly asking QU questions."""
    type_ = "_quservice._tcp.local."
    zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])

    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf.async_send

    first_outgoing = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_outgoing
        if first_outgoing is None:
            first_outgoing = out
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf, "async_send", send):
        zeroconf.get_service_info(f"name.{type_}", type_, 500, question_type=r.DNSQuestionType.QU)
        assert first_outgoing.questions[0].unicast is True  # type: ignore[union-attr]
        zeroconf.close()


def test_asking_qm_questions():
    """Verify explicitly asking QM questions."""
    type_ = "_quservice._tcp.local."
    zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])

    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf.async_send

    first_outgoing = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_outgoing
        if first_outgoing is None:
            first_outgoing = out
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf, "async_send", send):
        zeroconf.get_service_info(f"name.{type_}", type_, 500, question_type=r.DNSQuestionType.QM)
        assert first_outgoing.questions[0].unicast is False  # type: ignore[union-attr]
        zeroconf.close()


def test_request_timeout():
    """Test that the timeout does not throw an exception and finishes close to the actual timeout."""
    zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])
    start_time = r.current_time_millis()
    assert zeroconf.get_service_info("_notfound.local.", "notthere._notfound.local.") is None
    end_time = r.current_time_millis()
    zeroconf.close()
    # 3000ms for the default timeout
    # 1000ms for loaded systems + schedule overhead
    assert (end_time - start_time) < 3000 + 1000


@pytest.mark.asyncio
async def test_we_try_four_times_with_random_delay():
    """Verify we try four times even with the random delay."""
    type_ = "_typethatisnothere._tcp.local."
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])

    # we are going to patch the zeroconf send to check query transmission
    request_count = 0

    def async_send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal request_count
        request_count += 1

    # patch the zeroconf send
    with patch.object(aiozc.zeroconf, "async_send", async_send):
        await aiozc.async_get_service_info(f"willnotbefound.{type_}", type_)

    await aiozc.async_close()

    assert request_count == 4


@pytest.mark.asyncio
async def test_release_wait_when_new_recorded_added():
    """Test that async_request returns as soon as new matching records are added to the cache."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
    task = asyncio.create_task(info.async_request(aiozc.zeroconf, timeout=200))
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))
    assert await asyncio.wait_for(task, timeout=2)
    assert info.addresses == [b"\x7f\x00\x00\x01"]
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_port_changes_are_seen():
    """Test that port changes are seen by async_request."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            90,
            90,
            81,
            host,
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name, 80, 10, 10, desc, host)
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.port == 81
    assert info.priority == 90
    assert info.weight == 90
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_port_changes_are_seen_with_directed_request():
    """Test that port changes are seen by async_request with a directed request."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            90,
            90,
            81,
            host,
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name, 80, 10, 10, desc, host)
    await info.async_request(aiozc.zeroconf, timeout=200, addr="127.0.0.1", port=5353)
    assert info.port == 81
    assert info.priority == 90
    assert info.weight == 90
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_ipv4_changes_are_seen():
    """Test that ipv4 changes are seen by async_request."""
    type_ = "_http._tcp.local."
    registration_name = f"multiaipv4rec.{type_}"
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))
    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x01"]

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x02",
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V4Only) == [
        b"\x7f\x00\x00\x02",
        b"\x7f\x00\x00\x01",
    ]
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V4Only) == [
        b"\x7f\x00\x00\x02",
        b"\x7f\x00\x00\x01",
    ]
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_ipv6_changes_are_seen():
    """Test that ipv6 changes are seen by async_request."""
    type_ = "_http._tcp.local."
    registration_name = f"multiaipv6rec.{type_}"
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_A],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_AAAA,
            const._CLASS_IN,
            10000,
            b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))
    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V6Only) == [
        b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
    ]
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V6Only) == [
        b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
    ]

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_AAAA,
            const._CLASS_IN,
            10000,
            b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V6Only) == [
        b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
        b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
    ]
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V6Only) == [
        b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
        b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
    ]

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_bad_ip_addresses_ignored_in_cache():
    """Test that bad ip address in the cache are ignored async_request."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    # Manually add a bad record to the cache
    aiozc.zeroconf.cache.async_add_records([DNSAddress(host, const._TYPE_A, const._CLASS_IN, 10000, b"\x00")])

    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))
    info = ServiceInfo(type_, registration_name)
    info.load_from_cache(aiozc.zeroconf)
    assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x01"]


@pytest.mark.asyncio
async def test_service_name_change_as_seen_has_ip_in_cache():
    """Test that service name changes are seen by async_request when the ip is in the cache."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            registration_name,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x02",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V4Only) == []

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x02"]

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_service_name_change_as_seen_ip_not_in_cache():
    """Test that service name changes are seen by async_request when the ip is not in the cache."""
    type_ = "_http._tcp.local."
    registration_name = f"multiarec.{type_}"
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahost.local."

    # New kwarg way
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            registration_name,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await aiozc.zeroconf.async_wait_for_start()
    await asyncio.sleep(0)
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V4Only) == []

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x02",
        ),
        0,
    )
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))

    info = ServiceInfo(type_, registration_name)
    await info.async_request(aiozc.zeroconf, timeout=200)
    assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x02"]

    await aiozc.async_close()


@pytest.mark.asyncio
@patch.object(info, "_LISTENER_TIME", 10000000)
async def test_release_wait_when_new_recorded_added_concurrency():
    """Test that concurrent async_request returns as soon as new matching records are added to the cache."""
    type_ = "_http._tcp.local."
    registration_name = f"multiareccon.{type_}"
    desc = {"path": "/~paulsm/"}
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    host = "multahostcon.local."
    await aiozc.zeroconf.async_wait_for_start()

    # New kwarg way
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
    tasks = [asyncio.create_task(info.async_request(aiozc.zeroconf, timeout=200000)) for _ in range(10)]
    await asyncio.sleep(0.1)
    for task in tasks:
        assert not task.done()
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        r.DNSNsec(
            registration_name,
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            registration_name,
            [const._TYPE_AAAA],
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSService(
            registration_name,
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            0,
            0,
            80,
            host,
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSAddress(
            host,
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )
    generated.add_answer_at_time(
        r.DNSText(
            registration_name,
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            10000,
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )
    await asyncio.sleep(0)
    for task in tasks:
        assert not task.done()
    aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0]))
    _, pending = await asyncio.wait(tasks, timeout=2)
    assert not pending
    assert info.addresses == [b"\x7f\x00\x00\x01"]
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_service_info_nsec_records():
    """Test we can generate nsec records from ServiceInfo."""
    type_ = "_http._tcp.local."
    registration_name = f"multiareccon.{type_}"
    desc = {"path": "/~paulsm/"}
    host = "multahostcon.local."
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
    nsec_record = info.dns_nsec([const._TYPE_A, const._TYPE_AAAA], 50)
    assert nsec_record.name == registration_name
    assert nsec_record.type == const._TYPE_NSEC
    assert nsec_record.ttl == 50
    assert nsec_record.rdtypes == [const._TYPE_A, const._TYPE_AAAA]


@pytest.mark.asyncio
async def test_address_resolver():
    """Test that the address resolver works."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    resolver = r.AddressResolver("address_resolver_test.local.")
    resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000))
    outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    outgoing.add_answer_at_time(
        r.DNSAddress(
            "address_resolver_test.local.",
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )

    aiozc.zeroconf.async_send(outgoing)
    assert await resolve_task
    assert resolver.addresses == [b"\x7f\x00\x00\x01"]


@pytest.mark.asyncio
async def test_address_resolver_ipv4():
    """Test that the IPv4 address resolver works."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    resolver = r.AddressResolverIPv4("address_resolver_test_ipv4.local.")
    resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000))
    outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    outgoing.add_answer_at_time(
        r.DNSAddress(
            "address_resolver_test_ipv4.local.",
            const._TYPE_A,
            const._CLASS_IN,
            10000,
            b"\x7f\x00\x00\x01",
        ),
        0,
    )

    aiozc.zeroconf.async_send(outgoing)
    assert await resolve_task
    assert resolver.addresses == [b"\x7f\x00\x00\x01"]


@pytest.mark.asyncio
@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
async def test_address_resolver_ipv6():
    """Test that the IPv6 address resolver works."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    resolver = r.AddressResolverIPv6("address_resolver_test_ipv6.local.")
    resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000))
    outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    outgoing.add_answer_at_time(
        r.DNSAddress(
            "address_resolver_test_ipv6.local.",
            const._TYPE_AAAA,
            const._CLASS_IN,
            10000,
            socket.inet_pton(socket.AF_INET6, "fe80::52e:c2f2:bc5f:e9c6"),
        ),
        0,
    )

    aiozc.zeroconf.async_send(outgoing)
    assert await resolve_task
    assert resolver.ip_addresses_by_version(IPVersion.All) == [ip_address("fe80::52e:c2f2:bc5f:e9c6")]
07070100000071000081A400000000000000000000000167C7AD1600001247000000000000000000000000000000000000003800000000python-zeroconf-0.146.0/tests/services/test_registry.py"""Unit tests for zeroconf._services.registry."""

from __future__ import annotations

import socket
import unittest

import zeroconf as r
from zeroconf import ServiceInfo


class TestServiceRegistry(unittest.TestCase):
    def test_only_register_once(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        registry = r.ServiceRegistry()
        registry.async_add(info)
        self.assertRaises(r.ServiceNameAlreadyRegistered, registry.async_add, info)
        registry.async_remove(info)
        registry.async_add(info)

    def test_register_same_server(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        name2 = "xxxyyy2"
        registration_name = f"{name}.{type_}"
        registration_name2 = f"{name2}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "same.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        info2 = ServiceInfo(
            type_,
            registration_name2,
            80,
            0,
            0,
            desc,
            "same.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        registry = r.ServiceRegistry()
        registry.async_add(info)
        registry.async_add(info2)
        assert registry.async_get_infos_server("same.local.") == [info, info2]
        registry.async_remove(info)
        assert registry.async_get_infos_server("same.local.") == [info2]
        registry.async_remove(info2)
        assert registry.async_get_infos_server("same.local.") == []

    def test_unregister_multiple_times(self):
        """Verify we can unregister a service multiple times.

        In production unregister_service and unregister_all_services
        may happen at the same time during shutdown. We want to treat
        this as non-fatal since its expected to happen and it is unlikely
        that the callers know about each other.
        """
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        registry = r.ServiceRegistry()
        registry.async_add(info)
        self.assertRaises(r.ServiceNameAlreadyRegistered, registry.async_add, info)
        registry.async_remove(info)
        registry.async_remove(info)

    def test_lookups(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        registry = r.ServiceRegistry()
        registry.async_add(info)

        assert registry.async_get_service_infos() == [info]
        assert registry.async_get_info_name(registration_name) == info
        assert registry.async_get_infos_type(type_) == [info]
        assert registry.async_get_infos_server("ash-2.local.") == [info]
        assert registry.async_get_types() == [type_]

    def test_lookups_upper_case_by_lower_case(self):
        type_ = "_test-SRVC-type._tcp.local."
        name = "Xxxyyy"
        registration_name = f"{name}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "ASH-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        registry = r.ServiceRegistry()
        registry.async_add(info)

        assert registry.async_get_service_infos() == [info]
        assert registry.async_get_info_name(registration_name.lower()) == info
        assert registry.async_get_infos_type(type_.lower()) == [info]
        assert registry.async_get_infos_server("ash-2.local.") == [info]
        assert registry.async_get_types() == [type_.lower()]
07070100000072000081A400000000000000000000000167C7AD1600001251000000000000000000000000000000000000003500000000python-zeroconf-0.146.0/tests/services/test_types.py"""Unit tests for zeroconf._services.types."""

from __future__ import annotations

import logging
import os
import socket
import sys
import unittest

import zeroconf as r
from zeroconf import ServiceInfo, Zeroconf, ZeroconfServiceTypes

from .. import _clear_cache, has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


def test_integration_with_listener(disable_duplicate_packet_suppression):
    type_ = "_test-listen-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"])
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zeroconf_registrar.registry.async_add(info)
    try:
        service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2)
        assert type_ in service_types
        _clear_cache(zeroconf_registrar)
        service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
        assert type_ in service_types

    finally:
        zeroconf_registrar.close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_integration_with_listener_v6_records(disable_duplicate_packet_suppression):
    type_ = "_test-listenv6rec-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    addr = "2606:2800:220:1:248:1893:25c8:1946"  # example.com

    zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"])
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_pton(socket.AF_INET6, addr)],
    )
    zeroconf_registrar.registry.async_add(info)
    try:
        service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2)
        assert type_ in service_types
        _clear_cache(zeroconf_registrar)
        service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
        assert type_ in service_types

    finally:
        zeroconf_registrar.close()


@unittest.skipIf(not has_working_ipv6() or sys.platform == "win32", "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_integration_with_listener_ipv6(disable_duplicate_packet_suppression):
    type_ = "_test-listenv6ip-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    addr = "2606:2800:220:1:248:1893:25c8:1946"  # example.com

    zeroconf_registrar = Zeroconf(ip_version=r.IPVersion.V6Only)
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_pton(socket.AF_INET6, addr)],
    )
    zeroconf_registrar.registry.async_add(info)
    try:
        service_types = ZeroconfServiceTypes.find(ip_version=r.IPVersion.V6Only, timeout=2)
        assert type_ in service_types
        _clear_cache(zeroconf_registrar)
        service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
        assert type_ in service_types

    finally:
        zeroconf_registrar.close()


def test_integration_with_subtype_and_listener(disable_duplicate_packet_suppression):
    subtype_ = "_subtype._sub"
    type_ = "_listen._tcp.local."
    name = "xxxyyy"
    # Note: discovery returns only DNS-SD type not subtype
    discovery_type = f"{subtype_}.{type_}"
    registration_name = f"{name}.{type_}"

    zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"])
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        discovery_type,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zeroconf_registrar.registry.async_add(info)
    try:
        service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2)
        assert discovery_type in service_types
        _clear_cache(zeroconf_registrar)
        service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
        assert discovery_type in service_types

    finally:
        zeroconf_registrar.close()
07070100000073000081A400000000000000000000000167C7AD160000B126000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/tests/test_asyncio.py"""Unit tests for aio.py."""

from __future__ import annotations

import asyncio
import logging
import os
import socket
import threading
from typing import cast
from unittest.mock import ANY, call, patch

import pytest

import zeroconf._services.browser as _services_browser
from zeroconf import (
    DNSAddress,
    DNSIncoming,
    DNSOutgoing,
    DNSPointer,
    DNSQuestion,
    DNSService,
    DNSText,
    NotRunningException,
    ServiceStateChange,
    Zeroconf,
    const,
)
from zeroconf._exceptions import (
    BadTypeInNameException,
    NonUniqueNameException,
    ServiceNameAlreadyRegistered,
)
from zeroconf._services import ServiceListener
from zeroconf._services.info import ServiceInfo
from zeroconf._utils.time import current_time_millis
from zeroconf.asyncio import (
    AsyncServiceBrowser,
    AsyncServiceInfo,
    AsyncZeroconf,
    AsyncZeroconfServiceTypes,
)
from zeroconf.const import _LISTENER_TIME

from . import (
    QuestionHistoryWithoutSuppression,
    _clear_cache,
    has_working_ipv6,
    time_changed_millis,
)

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


@pytest.fixture(autouse=True)
def verify_threads_ended():
    """Verify that the threads are not running after the test."""
    threads_before = frozenset(threading.enumerate())
    yield
    threads_after = frozenset(threading.enumerate())
    non_executor_threads = frozenset(
        thread
        for thread in threads_after
        if "asyncio" not in thread.name and "ThreadPoolExecutor" not in thread.name
    )
    threads = non_executor_threads - threads_before
    assert not threads


@pytest.mark.asyncio
async def test_async_basic_usage() -> None:
    """Test we can create and close the instance."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_close_twice() -> None:
    """Test we can close twice."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.async_close()
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_with_sync_passed_in() -> None:
    """Test we can create and close the instance when passing in a sync Zeroconf."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    aiozc = AsyncZeroconf(zc=zc)
    assert aiozc.zeroconf is zc
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_with_sync_passed_in_closed_in_async() -> None:
    """Test caller closes the sync version in async."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    aiozc = AsyncZeroconf(zc=zc)
    assert aiozc.zeroconf is zc
    zc.close()
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_sync_within_event_loop_executor() -> None:
    """Test sync version still works from an executor within an event loop."""

    def sync_code():
        zc = Zeroconf(interfaces=["127.0.0.1"])
        assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None
        zc.close()

    await asyncio.get_event_loop().run_in_executor(None, sync_code)


@pytest.mark.asyncio
async def test_async_service_registration() -> None:
    """Test registering services broadcasts the registration by default."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()

    aiozc.zeroconf.add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)
    await task
    new_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    task = await aiozc.async_update_service(new_info)
    await task
    assert new_info.dns_service().server_key == "ash-2.local."
    new_info.server = "ash-3.local."
    task = await aiozc.async_update_service(new_info)
    await task
    assert new_info.dns_service().server_key == "ash-3.local."

    task = await aiozc.async_unregister_service(new_info)
    await task
    await aiozc.async_close()

    assert calls == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
        ("update", type_, registration_name),
        ("remove", type_, registration_name),
    ]


@pytest.mark.asyncio
async def test_async_service_registration_with_server_missing() -> None:
    """Test registering a service with the server not specified.

    For backwards compatibility, the server should be set to the
    name that was passed in.
    """
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()

    aiozc.zeroconf.add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)
    await task

    assert info.server == registration_name
    assert info.server_key == registration_name
    new_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    task = await aiozc.async_update_service(new_info)
    await task

    task = await aiozc.async_unregister_service(new_info)
    await task
    await aiozc.async_close()

    assert calls == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
        ("remove", type_, registration_name),
    ]


@pytest.mark.asyncio
async def test_async_service_registration_same_server_different_ports() -> None:
    """Test registering services with the same server with different srv records."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    name2 = "xxxyyy2"

    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name2}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()

    aiozc.zeroconf.add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_,
        registration_name2,
        81,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    tasks = []
    tasks.append(await aiozc.async_register_service(info))
    tasks.append(await aiozc.async_register_service(info2))
    await asyncio.gather(*tasks)

    task = await aiozc.async_unregister_service(info)
    await task
    entries = aiozc.zeroconf.cache.async_entries_with_server("ash-2.local.")
    assert len(entries) == 1
    assert info2.dns_service() in entries
    await aiozc.async_close()
    assert calls == [
        ("add", type_, registration_name),
        ("add", type_, registration_name2),
        ("remove", type_, registration_name),
        ("remove", type_, registration_name2),
    ]


@pytest.mark.asyncio
async def test_async_service_registration_same_server_same_ports() -> None:
    """Test registering services with the same server with the exact same srv record."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    name2 = "xxxyyy2"

    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name2}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()

    aiozc.zeroconf.add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    tasks = []
    tasks.append(await aiozc.async_register_service(info))
    tasks.append(await aiozc.async_register_service(info2))
    await asyncio.gather(*tasks)

    task = await aiozc.async_unregister_service(info)
    await task
    entries = aiozc.zeroconf.cache.async_entries_with_server("ash-2.local.")
    assert len(entries) == 1
    assert info2.dns_service() in entries
    await aiozc.async_close()
    assert calls == [
        ("add", type_, registration_name),
        ("add", type_, registration_name2),
        ("remove", type_, registration_name),
        ("remove", type_, registration_name2),
    ]


@pytest.mark.asyncio
async def test_async_service_registration_name_conflict() -> None:
    """Test registering services throws on name conflict."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test-srvc2-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)
    await task

    with pytest.raises(NonUniqueNameException):
        task = await aiozc.async_register_service(info)
        await task

    with pytest.raises(ServiceNameAlreadyRegistered):
        task = await aiozc.async_register_service(info, cooperating_responders=True)
        await task

    conflicting_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-3.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )

    with pytest.raises(NonUniqueNameException):
        task = await aiozc.async_register_service(conflicting_info)
        await task

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_service_registration_name_does_not_match_type() -> None:
    """Test registering services throws when the name does not match the type."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test-srvc3-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info.type = "_wrong._tcp.local."
    with pytest.raises(BadTypeInNameException):
        task = await aiozc.async_register_service(info)
        await task
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_service_registration_name_strict_check() -> None:
    """Test registering services throws when the name does not comply."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_ibisip_http._tcp.local."
    name = "CustomerInformationService-F4D4895E9EEB"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    with pytest.raises(BadTypeInNameException):
        await zc.async_check_service(info, allow_name_change=False)

    with pytest.raises(BadTypeInNameException):
        task = await aiozc.async_register_service(info)
        await task

    await zc.async_check_service(info, allow_name_change=False, strict=False)
    task = await aiozc.async_register_service(info, strict=False)
    await task

    await aiozc.async_unregister_service(info)
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_tasks() -> None:
    """Test awaiting broadcast tasks"""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test-srvc4-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()
    aiozc.zeroconf.add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)
    assert isinstance(task, asyncio.Task)
    await task

    new_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    task = await aiozc.async_update_service(new_info)
    assert isinstance(task, asyncio.Task)
    await task

    task = await aiozc.async_unregister_service(new_info)
    assert isinstance(task, asyncio.Task)
    await task

    await aiozc.async_close()

    assert calls == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
        ("remove", type_, registration_name),
    ]


@pytest.mark.asyncio
async def test_async_wait_unblocks_on_update() -> None:
    """Test async_wait will unblock on update."""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test-srvc4-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)

    # Should unblock due to update from the
    # registration
    now = current_time_millis()
    await aiozc.zeroconf.async_wait(50000)
    assert current_time_millis() - now < 3000
    await task

    now = current_time_millis()
    await aiozc.zeroconf.async_wait(50)
    assert current_time_millis() - now < 1000

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_service_info_async_request() -> None:
    """Test registering services broadcasts and query with AsyncServceInfo.async_request."""
    if not has_working_ipv6() or os.environ.get("SKIP_IPV6"):
        pytest.skip("Requires IPv6")

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    name2 = "abc"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name2}.{type_}"

    # Start a tasks BEFORE the registration that will keep trying
    # and see the registration a bit later
    get_service_info_task1 = asyncio.ensure_future(aiozc.async_get_service_info(type_, registration_name))
    await asyncio.sleep(_LISTENER_TIME / 1000 / 2)
    get_service_info_task2 = asyncio.ensure_future(aiozc.async_get_service_info(type_, registration_name))

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-1.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-5.local.",
        addresses=[socket.inet_aton("10.0.1.5")],
    )
    tasks = []
    tasks.append(await aiozc.async_register_service(info))
    tasks.append(await aiozc.async_register_service(info2))
    await asyncio.gather(*tasks)

    aiosinfo = await get_service_info_task1
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]

    aiosinfo = await get_service_info_task2
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]

    aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]

    new_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[
            socket.inet_aton("10.0.1.3"),
            socket.inet_pton(socket.AF_INET6, "6001:db8::1"),
        ],
    )

    task = await aiozc.async_update_service(new_info)
    await task

    aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.3")]

    aiosinfo = await aiozc.zeroconf.async_get_service_info(type_, registration_name)
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.3")]

    aiosinfos = await asyncio.gather(
        aiozc.async_get_service_info(type_, registration_name),
        aiozc.async_get_service_info(type_, registration_name2),
    )
    assert aiosinfos[0] is not None
    assert aiosinfos[0].addresses == [socket.inet_aton("10.0.1.3")]
    assert aiosinfos[1] is not None
    assert aiosinfos[1].addresses == [socket.inet_aton("10.0.1.5")]

    aiosinfo = AsyncServiceInfo(type_, registration_name)
    _clear_cache(aiozc.zeroconf)
    # Generating the race condition is almost impossible
    # without patching since its a TOCTOU race
    with patch("zeroconf.asyncio.AsyncServiceInfo._is_complete", False):
        await aiosinfo.async_request(aiozc.zeroconf, 3000)
    assert aiosinfo is not None
    assert aiosinfo.addresses == [socket.inet_aton("10.0.1.3")]

    task = await aiozc.async_unregister_service(new_info)
    await task

    aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
    assert aiosinfo is None

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_service_browser() -> None:
    """Test AsyncServiceBrowser."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test9-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    calls = []

    class MyListener(ServiceListener):
        def add_service(self, aiozc: Zeroconf, type: str, name: str) -> None:
            calls.append(("add", type, name))

        def remove_service(self, aiozc: Zeroconf, type: str, name: str) -> None:
            calls.append(("remove", type, name))

        def update_service(self, aiozc: Zeroconf, type: str, name: str) -> None:
            calls.append(("update", type, name))

    listener = MyListener()
    await aiozc.async_add_service_listener(type_, listener)

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await aiozc.async_register_service(info)
    await task
    new_info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    task = await aiozc.async_update_service(new_info)
    await task
    task = await aiozc.async_unregister_service(new_info)
    await task
    await aiozc.zeroconf.async_wait(1)
    await aiozc.async_close()

    assert calls == [
        ("add", type_, registration_name),
        ("update", type_, registration_name),
        ("remove", type_, registration_name),
    ]


@pytest.mark.asyncio
async def test_async_context_manager() -> None:
    """Test using an async context manager."""
    type_ = "_test10-sr-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    async with AsyncZeroconf(interfaces=["127.0.0.1"]) as aiozc:
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aiozc.async_register_service(info)
        await task
        aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
        assert aiosinfo is not None


@pytest.mark.asyncio
async def test_service_browser_cancel_async_context_manager():
    """Test we can cancel an AsyncServiceBrowser with it being used as an async context manager."""

    # instantiate a zeroconf instance
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    type_ = "_hap._tcp.local."

    class MyServiceListener(ServiceListener):
        pass

    listener = MyServiceListener()

    browser = AsyncServiceBrowser(zc, type_, None, listener)

    assert cast(bool, browser.done) is False

    async with browser:
        pass

    assert cast(bool, browser.done) is True

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_unregister_all_services() -> None:
    """Test unregistering all services."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    type_ = "_test1-srvc-type._tcp.local."
    name = "xxxyyy"
    name2 = "abc"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name2}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-1.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-5.local.",
        addresses=[socket.inet_aton("10.0.1.5")],
    )
    tasks = []
    tasks.append(await aiozc.async_register_service(info))
    tasks.append(await aiozc.async_register_service(info2))
    await asyncio.gather(*tasks)

    tasks = []
    tasks.append(aiozc.async_get_service_info(type_, registration_name))
    tasks.append(aiozc.async_get_service_info(type_, registration_name2))
    results = await asyncio.gather(*tasks)
    assert results[0] is not None
    assert results[1] is not None

    await aiozc.async_unregister_all_services()
    _clear_cache(aiozc.zeroconf)

    tasks = []
    tasks.append(aiozc.async_get_service_info(type_, registration_name))
    tasks.append(aiozc.async_get_service_info(type_, registration_name2))
    results = await asyncio.gather(*tasks)
    assert results[0] is None
    assert results[1] is None

    # Verify we can call again
    await aiozc.async_unregister_all_services()

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_zeroconf_service_types():
    type_ = "_test-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    task = await zeroconf_registrar.async_register_service(info)
    await task
    # Ensure we do not clear the cache until after the last broadcast is processed
    await asyncio.sleep(0.2)
    _clear_cache(zeroconf_registrar.zeroconf)
    try:
        service_types = await AsyncZeroconfServiceTypes.async_find(interfaces=["127.0.0.1"], timeout=2)
        assert type_ in service_types
        _clear_cache(zeroconf_registrar.zeroconf)
        service_types = await AsyncZeroconfServiceTypes.async_find(aiozc=zeroconf_registrar, timeout=2)
        assert type_ in service_types

    finally:
        await zeroconf_registrar.async_close()


@pytest.mark.asyncio
async def test_guard_against_running_serviceinfo_request_event_loop() -> None:
    """Test that running ServiceInfo.request from the event loop throws."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])

    service_info = AsyncServiceInfo("_hap._tcp.local.", "doesnotmatter._hap._tcp.local.")
    with pytest.raises(RuntimeError):
        service_info.request(aiozc.zeroconf, 3000)
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_service_browser_instantiation_generates_add_events_from_cache():
    """Test that the ServiceBrowser will generate Add events with the existing cache when starting."""

    # instantiate a zeroconf instance
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    type_ = "_hap._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    callbacks = []

    class MyServiceListener(ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("update", type_, name))

    listener = MyServiceListener()

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
    zc.cache.async_add_records(
        [info.dns_pointer(), info.dns_service(), *info.dns_addresses(), info.dns_text()]
    )

    browser = AsyncServiceBrowser(zc, type_, None, listener)

    await asyncio.sleep(0)

    assert callbacks == [
        ("add", type_, registration_name),
    ]
    await browser.async_cancel()

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_integration():
    service_added = asyncio.Event()
    service_removed = asyncio.Event()
    unexpected_ttl = asyncio.Event()
    got_query = asyncio.Event()

    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()
            elif state_change is ServiceStateChange.Removed:
                service_removed.set()

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_browser = aiozc.zeroconf
    zeroconf_browser.question_history = QuestionHistoryWithoutSuppression()
    await zeroconf_browser.async_wait_for_start()

    # we are going to patch the zeroconf send to check packet sizes
    old_send = zeroconf_browser.async_send

    expected_ttl = const._DNS_OTHER_TTL
    nbr_answers = 0
    answers = []
    packets = []

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
        """Sends an outgoing packet."""
        pout = DNSIncoming(out.packets()[0])
        packets.append(pout)
        last_answers = pout.answers()
        answers.append(last_answers)

        nonlocal nbr_answers
        for answer in last_answers:
            nbr_answers += 1
            if not answer.ttl > expected_ttl / 2:
                unexpected_ttl.set()

        got_query.set()

        old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope)

    assert len(zeroconf_browser.engine.protocols) == 2

    aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_registrar = aio_zeroconf_registrar.zeroconf
    await aio_zeroconf_registrar.zeroconf.async_wait_for_start()

    assert len(zeroconf_registrar.engine.protocols) == 2
    # patch the zeroconf send so we can capture what is being sent
    with patch.object(zeroconf_browser, "async_send", send):
        service_added = asyncio.Event()
        service_removed = asyncio.Event()

        browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            {"path": "/~paulsm/"},
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        task = await aio_zeroconf_registrar.async_register_service(info)
        await task
        loop = asyncio.get_running_loop()
        try:
            await asyncio.wait_for(service_added.wait(), 1)
            assert service_added.is_set()
            # Make sure the startup queries are sent
            original_now = loop.time()
            start_millis = original_now * 1000

            now_millis = start_millis
            for query_count in range(_services_browser.STARTUP_QUERIES):
                now_millis += (2**query_count) * 1000
                time_changed_millis(now_millis)

            got_query.clear()
            assert not unexpected_ttl.is_set()

            assert len(packets) == _services_browser.STARTUP_QUERIES
            packets.clear()

            # Wait for the first refresh query
            # Move time forward past when the TTL is no longer
            # fresh (AKA ~75% of the TTL)
            now_millis = start_millis + ((expected_ttl * 1000) * 0.76)
            time_changed_millis(now_millis)

            await asyncio.wait_for(got_query.wait(), 1)
            assert not unexpected_ttl.is_set()
            assert len(packets) == 1
            packets.clear()

            assert len(answers) == _services_browser.STARTUP_QUERIES + 1
            # The first question should have no known answers
            assert len(answers[0]) == 0
            # The rest of the startup questions should have
            # known answers
            for answer_list in answers[1:-2]:
                assert len(answer_list) == 1
            # Once the TTL is reached, the last question should have no known answers
            assert len(answers[-1]) == 0

            got_query.clear()
            packets.clear()
            # Move time forward past when the TTL is no longer
            # fresh (AKA 85% of the TTL) to ensure we try
            # to rescue the record
            now_millis = start_millis + ((expected_ttl * 1000) * 0.87)
            time_changed_millis(now_millis)

            await asyncio.wait_for(got_query.wait(), 1)
            assert len(packets) == 1
            assert not unexpected_ttl.is_set()

            packets.clear()
            got_query.clear()
            # Move time forward past when the TTL is no longer
            # fresh (AKA 95% of the TTL). At this point
            # nothing should get scheduled rescued because the rescue
            # would exceed the TTL
            now_millis = start_millis + ((expected_ttl * 1000) * 0.98)

            # Verify we don't send a query for a record that is
            # past the TTL as we should not try to rescue it
            # once its past the TTL
            time_changed_millis(now_millis)
            await asyncio.wait_for(got_query.wait(), 1)
            assert len(packets) == 1

            # Don't remove service, allow close() to cleanup
        finally:
            await aio_zeroconf_registrar.async_close()
            await asyncio.wait_for(service_removed.wait(), 1)
            assert service_removed.is_set()
            await browser.async_cancel()
            await aiozc.async_close()


@pytest.mark.asyncio
async def test_info_asking_default_is_asking_qm_questions_after_the_first_qu():
    """Verify the service info first question is QU and subsequent ones are QM questions."""
    type_ = "_quservice._tcp.local."
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zeroconf_info = aiozc.zeroconf

    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    zeroconf_info.registry.async_add(info)

    # we are going to patch the zeroconf send to check query transmission
    old_send = zeroconf_info.async_send

    first_outgoing = None
    second_outgoing = None

    def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
        """Sends an outgoing packet."""
        nonlocal first_outgoing
        nonlocal second_outgoing
        if out.questions:
            if first_outgoing is not None and second_outgoing is None:  # type: ignore[unreachable]
                second_outgoing = out  # type: ignore[unreachable]
            if first_outgoing is None:
                first_outgoing = out
        old_send(out, addr=addr, port=port)

    # patch the zeroconf send
    with patch.object(zeroconf_info, "async_send", send):
        aiosinfo = AsyncServiceInfo(type_, registration_name)
        # Patch _is_complete so we send multiple times
        with patch("zeroconf.asyncio.AsyncServiceInfo._is_complete", False):
            await aiosinfo.async_request(aiozc.zeroconf, 1200)
        try:
            assert first_outgoing.questions[0].unicast is True  # type: ignore[union-attr]
            assert second_outgoing.questions[0].unicast is False  # type: ignore[attr-defined]
        finally:
            await aiozc.async_close()


@pytest.mark.asyncio
async def test_service_browser_ignores_unrelated_updates():
    """Test that the ServiceBrowser ignores unrelated updates."""

    # instantiate a zeroconf instance
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    type_ = "_veryuniqueone._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    callbacks = []

    class MyServiceListener(ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            if name == registration_name:
                callbacks.append(("update", type_, name))

    listener = MyServiceListener()

    desc = {"path": "/~paulsm/"}
    address_parsed = "10.0.1.2"
    address = socket.inet_aton(address_parsed)
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
    zc.cache.async_add_records(
        [
            info.dns_pointer(),
            info.dns_service(),
            *info.dns_addresses(),
            info.dns_text(),
            DNSService(
                "zoom._unrelated._tcp.local.",
                const._TYPE_SRV,
                const._CLASS_IN,
                const._DNS_HOST_TTL,
                0,
                0,
                81,
                "unrelated.local.",
            ),
        ]
    )

    browser = AsyncServiceBrowser(zc, type_, None, listener)

    generated = DNSOutgoing(const._FLAGS_QR_RESPONSE)
    generated.add_answer_at_time(
        DNSPointer(
            "_unrelated._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "zoom._unrelated._tcp.local.",
        ),
        0,
    )
    generated.add_answer_at_time(
        DNSAddress(
            "unrelated.local.",
            const._TYPE_A,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            b"1234",
        ),
        0,
    )
    generated.add_answer_at_time(
        DNSText(
            "zoom._unrelated._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"zoom",
        ),
        0,
    )

    zc.record_manager.async_updates_from_response(DNSIncoming(generated.packets()[0]))

    await browser.async_cancel()
    await asyncio.sleep(0)

    assert callbacks == [
        ("add", type_, registration_name),
    ]
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_request_timeout():
    """Test that the timeout does not throw an exception and finishes close to the actual timeout."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()
    start_time = current_time_millis()
    assert await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.") is None
    end_time = current_time_millis()
    await aiozc.async_close()
    # 3000ms for the default timeout
    # 1000ms for loaded systems + schedule overhead
    assert (end_time - start_time) < 3000 + 1000


@pytest.mark.asyncio
async def test_async_request_non_running_instance():
    """Test that the async_request throws when zeroconf is not running."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.async_close()
    with pytest.raises(NotRunningException):
        await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.")


@pytest.mark.asyncio
async def test_legacy_unicast_response(run_isolated):
    """Verify legacy unicast responses include questions and correct id."""
    type_ = "_mservice._tcp.local."
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()

    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    aiozc.zeroconf.registry.async_add(info)
    query = DNSOutgoing(const._FLAGS_QR_QUERY, multicast=False, id_=888)
    question = DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    query.add_question(question)
    protocol = aiozc.zeroconf.engine.protocols[0]

    with patch.object(aiozc.zeroconf, "async_send") as send_mock:
        protocol.datagram_received(query.packets()[0], ("127.0.0.1", 6503))

    calls = send_mock.mock_calls
    # Verify the response is sent back on the socket it was received from
    assert calls == [call(ANY, "127.0.0.1", 6503, (), protocol.transport)]
    outgoing = send_mock.call_args[0][0]
    assert isinstance(outgoing, DNSOutgoing)
    assert outgoing.questions == [question]
    assert outgoing.id == query.id
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_update_with_uppercase_names(run_isolated):
    """Test an ip update from a shelly which uses uppercase names."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()

    callbacks = []

    class MyServiceListener(ServiceListener):
        def add_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            callbacks.append(("add", type_, name))

        def remove_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            callbacks.append(("remove", type_, name))

        def update_service(self, zc, type_, name) -> None:  # type: ignore[no-untyped-def]
            nonlocal callbacks
            callbacks.append(("update", type_, name))

    listener = MyServiceListener()
    browser = AsyncServiceBrowser(aiozc.zeroconf, "_http._tcp.local.", None, listener)
    protocol = aiozc.zeroconf.engine.protocols[0]

    packet = b"\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbc=\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@"  # noqa: E501
    protocol.datagram_received(packet, ("127.0.0.1", 6503))
    await asyncio.sleep(0)
    packet = b"\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbcA\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@"  # noqa: E501
    protocol.datagram_received(packet, ("127.0.0.1", 6503))
    await browser.async_cancel()
    await aiozc.async_close()

    assert callbacks == [
        ("add", "_http._tcp.local.", "ShellyPro4PM-94B97EC07650._http._tcp.local."),
        ("update", "_http._tcp.local.", "ShellyPro4PM-94B97EC07650._http._tcp.local."),
    ]
07070100000074000081A400000000000000000000000167C7AD160000450C000000000000000000000000000000000000002C00000000python-zeroconf-0.146.0/tests/test_cache.py"""Unit tests for zeroconf._cache."""

from __future__ import annotations

import logging
import unittest.mock
from heapq import heapify, heappop

import pytest

import zeroconf as r
from zeroconf import const

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class TestDNSCache(unittest.TestCase):
    def test_order(self):
        record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        entry = r.DNSEntry("a", const._TYPE_SOA, const._CLASS_IN)
        cached_record = cache.get(entry)
        assert cached_record == record2

    def test_adding_same_record_to_cache_different_ttls_with_get(self):
        """We should always get back the last entry we added if there are different TTLs.

        This ensures we only have one source of truth for TTLs as a record cannot
        be both expired and not expired.
        """
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 10, b"a")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        entry = r.DNSEntry(record2.name, const._TYPE_A, const._CLASS_IN)
        cached_record = cache.get(entry)
        assert cached_record == record2

    def test_adding_same_record_to_cache_different_ttls_with_get_all(self):
        """Verify we only get one record back.

        The last record added should replace the previous since two
        records with different ttls are __eq__. This ensures we
        only have one source of truth for TTLs as a record cannot
        be both expired and not expired.
        """
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 10, b"a")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        cached_records = cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN)
        assert cached_records == [record2]

    def test_cache_empty_does_not_leak_memory_by_leaving_empty_list(self):
        record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert "a" in cache.cache
        cache.async_remove_records([record1, record2])
        assert "a" not in cache.cache

    def test_cache_empty_multiple_calls(self):
        record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert "a" in cache.cache
        cache.async_remove_records([record1, record2])
        assert "a" not in cache.cache


class TestDNSAsyncCacheAPI(unittest.TestCase):
    def test_async_get_unique(self):
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert cache.async_get_unique(record1) == record1
        assert cache.async_get_unique(record2) == record2

    def test_async_all_by_details(self):
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.async_all_by_details("a", const._TYPE_A, const._CLASS_IN)) == {
            record1,
            record2,
        }

    def test_async_entries_with_server(self):
        record1 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            85,
            "ab",
        )
        record2 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "ab",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.async_entries_with_server("ab")) == {record1, record2}
        assert set(cache.async_entries_with_server("AB")) == {record1, record2}

    def test_async_entries_with_name(self):
        record1 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            85,
            "ab",
        )
        record2 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "ab",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.async_entries_with_name("irrelevant")) == {record1, record2}
        assert set(cache.async_entries_with_name("Irrelevant")) == {record1, record2}


# These functions have been seen in other projects so
# we try to maintain a stable API for all the threadsafe getters
class TestDNSCacheAPI(unittest.TestCase):
    def test_get(self):
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b")
        record3 = r.DNSAddress("a", const._TYPE_AAAA, const._CLASS_IN, 1, b"ipv6")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2, record3])
        assert cache.get(record1) == record1
        assert cache.get(record2) == record2
        assert cache.get(r.DNSEntry("a", const._TYPE_A, const._CLASS_IN)) == record2
        assert cache.get(r.DNSEntry("a", const._TYPE_AAAA, const._CLASS_IN)) == record3
        assert cache.get(r.DNSEntry("notthere", const._TYPE_A, const._CLASS_IN)) is None

    def test_get_by_details(self):
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert cache.get_by_details("a", const._TYPE_A, const._CLASS_IN) == record2

    def test_get_all_by_details(self):
        record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a")
        record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b")
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN)) == {
            record1,
            record2,
        }

    def test_entries_with_server(self):
        record1 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            85,
            "ab",
        )
        record2 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "ab",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.entries_with_server("ab")) == {record1, record2}
        assert set(cache.entries_with_server("AB")) == {record1, record2}

    def test_entries_with_name(self):
        record1 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            85,
            "ab",
        )
        record2 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "ab",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert set(cache.entries_with_name("irrelevant")) == {record1, record2}
        assert set(cache.entries_with_name("Irrelevant")) == {record1, record2}

    def test_current_entry_with_name_and_alias(self):
        record1 = r.DNSPointer(
            "irrelevant",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "x.irrelevant",
        )
        record2 = r.DNSPointer(
            "irrelevant",
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            "y.irrelevant",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert cache.current_entry_with_name_and_alias("irrelevant", "x.irrelevant") == record1

    def test_name(self):
        record1 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            85,
            "ab",
        )
        record2 = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "ab",
        )
        cache = r.DNSCache()
        cache.async_add_records([record1, record2])
        assert cache.names() == ["irrelevant"]


def test_async_entries_with_name_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0)
    record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    assert next(iter(cache.async_entries_with_name("a"))) is record2


def test_async_entries_with_server_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSService("a", const._TYPE_SRV, const._CLASS_IN, 1, 1, 1, 1, "a", created=1.0)
    record2 = r.DNSService("a", const._TYPE_SRV, const._CLASS_IN, 1, 1, 1, 1, "a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    assert next(iter(cache.async_entries_with_server("a"))) is record2


def test_async_get_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0)
    record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    assert cache.get(record2) is record2


def test_async_get_returns_newest_nsec_record():
    cache = r.DNSCache()
    record1 = r.DNSNsec("a", const._TYPE_NSEC, const._CLASS_IN, 1, "a", [], created=1.0)
    record2 = r.DNSNsec("a", const._TYPE_NSEC, const._CLASS_IN, 1, "a", [], created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    assert cache.get(record2) is record2


def test_get_by_details_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0)
    record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    assert cache.get_by_details("a", const._TYPE_A, const._CLASS_IN) is record2


def test_get_all_by_details_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0)
    record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    records = cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN)
    assert len(records) == 1
    assert records[0] is record2


def test_async_get_all_by_details_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0)
    record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    records = cache.async_all_by_details("a", const._TYPE_A, const._CLASS_IN)
    assert len(records) == 1
    assert records[0] is record2


def test_async_get_unique_returns_newest_record():
    cache = r.DNSCache()
    record1 = r.DNSPointer("a", const._TYPE_PTR, const._CLASS_IN, 1, "a", created=1.0)
    record2 = r.DNSPointer("a", const._TYPE_PTR, const._CLASS_IN, 1, "a", created=2.0)
    cache.async_add_records([record1])
    cache.async_add_records([record2])
    record = cache.async_get_unique(record1)
    assert record is record2
    record = cache.async_get_unique(record2)
    assert record is record2


@pytest.mark.asyncio
async def test_cache_heap_cleanup() -> None:
    """Test that the heap gets cleaned up when there are many old expirations."""
    cache = r.DNSCache()
    # The heap should not be cleaned up when there are less than 100 expiration changes
    min_records_to_cleanup = 100
    now = r.current_time_millis()
    name = "heap.local."
    ttl_seconds = 100
    ttl_millis = ttl_seconds * 1000

    for i in range(min_records_to_cleanup):
        record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i)
        cache.async_add_records([record])

    assert len(cache._expire_heap) == min_records_to_cleanup
    assert len(cache.async_entries_with_name(name)) == 1

    # Now that we reached the minimum number of cookies to cleanup,
    # add one more cookie to trigger the cleanup
    record = r.DNSAddress(
        name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + min_records_to_cleanup
    )
    expected_expire_time = record.created + ttl_millis
    cache.async_add_records([record])
    assert len(cache.async_entries_with_name(name)) == 1
    entry = next(iter(cache.async_entries_with_name(name)))
    assert (entry.created + ttl_millis) == expected_expire_time
    assert entry is record

    # Verify that the heap has been cleaned up
    assert len(cache.async_entries_with_name(name)) == 1
    cache.async_expire(now)

    heap_copy = cache._expire_heap.copy()
    heapify(heap_copy)
    # Ensure heap order is maintained
    assert cache._expire_heap == heap_copy

    # The heap should have been cleaned up
    assert len(cache._expire_heap) == 1
    assert len(cache.async_entries_with_name(name)) == 1

    entry = next(iter(cache.async_entries_with_name(name)))
    assert entry is record

    assert (entry.created + ttl_millis) == expected_expire_time

    cache.async_expire(expected_expire_time)
    assert not cache.async_entries_with_name(name), cache._expire_heap


@pytest.mark.asyncio
async def test_cache_heap_multi_name_cleanup() -> None:
    """Test cleanup with multiple names."""
    cache = r.DNSCache()
    # The heap should not be cleaned up when there are less than 100 expiration changes
    min_records_to_cleanup = 100
    now = r.current_time_millis()
    name = "heap.local."
    name2 = "heap2.local."
    ttl_seconds = 100
    ttl_millis = ttl_seconds * 1000

    for i in range(min_records_to_cleanup):
        record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i)
        cache.async_add_records([record])
    expected_expire_time = record.created + ttl_millis

    for i in range(5):
        record = r.DNSAddress(
            name2, const._TYPE_A, const._CLASS_IN, ttl_seconds, bytes((i,)), created=now + i
        )
        cache.async_add_records([record])

    assert len(cache._expire_heap) == min_records_to_cleanup + 5
    assert len(cache.async_entries_with_name(name)) == 1
    assert len(cache.async_entries_with_name(name2)) == 5

    cache.async_expire(now)
    # The heap and expirations should have been cleaned up
    assert len(cache._expire_heap) == 1 + 5
    assert len(cache._expirations) == 1 + 5

    cache.async_expire(expected_expire_time)
    assert not cache.async_entries_with_name(name), cache._expire_heap


@pytest.mark.asyncio
async def test_cache_heap_pops_order() -> None:
    """Test cache heap is popped in order."""
    cache = r.DNSCache()
    # The heap should not be cleaned up when there are less than 100 expiration changes
    min_records_to_cleanup = 100
    now = r.current_time_millis()
    name = "heap.local."
    name2 = "heap2.local."
    ttl_seconds = 100

    for i in range(min_records_to_cleanup):
        record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i)
        cache.async_add_records([record])

    for i in range(5):
        record = r.DNSAddress(
            name2, const._TYPE_A, const._CLASS_IN, ttl_seconds, bytes((i,)), created=now + i
        )
        cache.async_add_records([record])

    assert len(cache._expire_heap) == min_records_to_cleanup + 5
    assert len(cache.async_entries_with_name(name)) == 1
    assert len(cache.async_entries_with_name(name2)) == 5

    start_ts = 0.0
    while cache._expire_heap:
        ts, _ = heappop(cache._expire_heap)
        assert ts >= start_ts
        start_ts = ts
07070100000075000081A400000000000000000000000167C7AD1600000355000000000000000000000000000000000000003700000000python-zeroconf-0.146.0/tests/test_circular_imports.py"""Test to check for circular imports."""

from __future__ import annotations

import asyncio
import sys

import pytest


@pytest.mark.asyncio
@pytest.mark.timeout(30)  # cloud can take > 9s
@pytest.mark.parametrize(
    "module",
    [
        "zeroconf",
        "zeroconf.asyncio",
        "zeroconf._protocol.incoming",
        "zeroconf._protocol.outgoing",
        "zeroconf.const",
        "zeroconf._logger",
        "zeroconf._transport",
        "zeroconf._record_update",
        "zeroconf._services.browser",
        "zeroconf._services.info",
    ],
)
async def test_circular_imports(module: str) -> None:
    """Check that components can be imported without circular imports."""
    process = await asyncio.create_subprocess_exec(sys.executable, "-c", f"import {module}")
    await process.communicate()
    assert process.returncode == 0
07070100000076000081A400000000000000000000000167C7AD1600005F19000000000000000000000000000000000000002B00000000python-zeroconf-0.146.0/tests/test_core.py"""Unit tests for zeroconf._core"""

from __future__ import annotations

import asyncio
import logging
import os
import socket
import sys
import threading
import time
import unittest
import unittest.mock
from typing import cast
from unittest.mock import AsyncMock, Mock, patch

import pytest

import zeroconf as r
from zeroconf import NotRunningException, Zeroconf, const, current_time_millis
from zeroconf._listener import AsyncListener, _WrappedTransport
from zeroconf._protocol.incoming import DNSIncoming
from zeroconf.asyncio import AsyncZeroconf

from . import _clear_cache, _inject_response, _wait_for_start, has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


def threadsafe_query(
    zc: Zeroconf,
    protocol: AsyncListener,
    msg: DNSIncoming,
    addr: str,
    port: int,
    transport: _WrappedTransport,
    v6_flow_scope: tuple[()] | tuple[int, int],
) -> None:
    async def make_query():
        protocol.handle_query_or_defer(msg, addr, port, transport, v6_flow_scope)

    assert zc.loop is not None
    asyncio.run_coroutine_threadsafe(make_query(), zc.loop).result()


class Framework(unittest.TestCase):
    def test_launch_and_close(self):
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.All)
        rv.close()
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default)
        rv.close()

    def test_launch_and_close_context_manager(self):
        with r.Zeroconf(interfaces=r.InterfaceChoice.All) as rv:
            assert rv.done is False
        assert rv.done is True

        with r.Zeroconf(interfaces=r.InterfaceChoice.Default) as rv:  # type: ignore[unreachable]
            assert rv.done is False
        assert rv.done is True

    def test_launch_and_close_unicast(self):
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, unicast=True)
        rv.close()
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, unicast=True)
        rv.close()

    def test_close_multiple_times(self):
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default)
        rv.close()
        rv.close()

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_launch_and_close_v4_v6(self):
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.All)
        rv.close()
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.All)
        rv.close()

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_launch_and_close_v6_only(self):
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.V6Only)
        rv.close()
        rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.V6Only)
        rv.close()

    @unittest.skipIf(sys.platform == "darwin", reason="apple_p2p failure path not testable on mac")
    def test_launch_and_close_apple_p2p_not_mac(self):
        with pytest.raises(RuntimeError):
            r.Zeroconf(apple_p2p=True)

    @unittest.skipIf(sys.platform != "darwin", reason="apple_p2p happy path only testable on mac")
    def test_launch_and_close_apple_p2p_on_mac(self):
        rv = r.Zeroconf(apple_p2p=True)
        rv.close()

    def test_async_updates_from_response(self):
        def mock_incoming_msg(
            service_state_change: r.ServiceStateChange,
        ) -> r.DNSIncoming:
            ttl = 120
            generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)

            if service_state_change == r.ServiceStateChange.Updated:
                generated.add_answer_at_time(
                    r.DNSText(
                        service_name,
                        const._TYPE_TXT,
                        const._CLASS_IN | const._CLASS_UNIQUE,
                        ttl,
                        service_text,
                    ),
                    0,
                )
                return r.DNSIncoming(generated.packets()[0])

            if service_state_change == r.ServiceStateChange.Removed:
                ttl = 0

            generated.add_answer_at_time(
                r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name),
                0,
            )
            generated.add_answer_at_time(
                r.DNSService(
                    service_name,
                    const._TYPE_SRV,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    0,
                    0,
                    80,
                    service_server,
                ),
                0,
            )
            generated.add_answer_at_time(
                r.DNSText(
                    service_name,
                    const._TYPE_TXT,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    service_text,
                ),
                0,
            )
            generated.add_answer_at_time(
                r.DNSAddress(
                    service_server,
                    const._TYPE_A,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    socket.inet_aton(service_address),
                ),
                0,
            )

            return r.DNSIncoming(generated.packets()[0])

        def mock_split_incoming_msg(
            service_state_change: r.ServiceStateChange,
        ) -> r.DNSIncoming:
            """Mock an incoming message for the case where the packet is split."""
            ttl = 120
            generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
            generated.add_answer_at_time(
                r.DNSAddress(
                    service_server,
                    const._TYPE_A,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    socket.inet_aton(service_address),
                ),
                0,
            )
            generated.add_answer_at_time(
                r.DNSService(
                    service_name,
                    const._TYPE_SRV,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    ttl,
                    0,
                    0,
                    80,
                    service_server,
                ),
                0,
            )
            return r.DNSIncoming(generated.packets()[0])

        service_name = "name._type._tcp.local."
        service_type = "_type._tcp.local."
        service_server = "ash-2.local."
        service_text = b"path=/~paulsm/"
        service_address = "10.0.1.2"

        zeroconf = r.Zeroconf(interfaces=["127.0.0.1"])

        try:
            # service added
            _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Added))
            dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
            assert dns_text is not None
            assert cast(r.DNSText, dns_text).text == service_text  # service_text is b'path=/~paulsm/'
            all_dns_text = zeroconf.cache.get_all_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
            assert [dns_text] == all_dns_text

            # https://tools.ietf.org/html/rfc6762#section-10.2
            # Instead of merging this new record additively into the cache in addition
            # to any previous records with the same name, rrtype, and rrclass,
            # all old records with that name, rrtype, and rrclass that were received
            # more than one second ago are declared invalid,
            # and marked to expire from the cache in one second.
            time.sleep(1.1)

            # service updated. currently only text record can be updated
            service_text = b"path=/~humingchun/"
            _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
            dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
            assert dns_text is not None
            assert cast(r.DNSText, dns_text).text == service_text  # service_text is b'path=/~humingchun/'

            time.sleep(1.1)

            # The split message only has a SRV and A record.
            # This should not evict TXT records from the cache
            _inject_response(zeroconf, mock_split_incoming_msg(r.ServiceStateChange.Updated))
            time.sleep(1.1)
            dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
            assert dns_text is not None
            assert cast(r.DNSText, dns_text).text == service_text  # service_text is b'path=/~humingchun/'

            # service removed
            _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Removed))
            dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
            assert dns_text is not None
            assert dns_text.is_expired(current_time_millis() + 1000)

        finally:
            zeroconf.close()


def test_generate_service_query_set_qu_bit():
    """Test generate_service_query sets the QU bit."""

    zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"])
    desc = {"path": "/~paulsm/"}
    type_ = "._hap._tcp.local."
    registration_name = "this-host-is-not-used._hap._tcp.local."
    info = r.ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    out = zeroconf_registrar.generate_service_query(info)
    assert out.questions[0].unicast is True
    zeroconf_registrar.close()


def test_invalid_packets_ignored_and_does_not_cause_loop_exception():
    """Ensure an invalid packet cannot cause the loop to collapse."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    generated = r.DNSOutgoing(0)
    packet = generated.packets()[0]
    packet = packet[:8] + b"deadbeef" + packet[8:]
    parsed = r.DNSIncoming(packet)
    assert parsed.valid is False

    # Invalid Packet
    mock_out = unittest.mock.Mock()
    mock_out.packets = lambda: [packet]
    zc.send(mock_out)

    # Invalid oversized packet
    mock_out = unittest.mock.Mock()
    mock_out.packets = lambda: [packet * 1000]
    zc.send(mock_out)

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    entry = r.DNSText(
        "didnotcrashincoming._crash._tcp.local.",
        const._TYPE_TXT,
        const._CLASS_IN | const._CLASS_UNIQUE,
        500,
        b"path=/~paulsm/",
    )
    assert isinstance(entry, r.DNSText)
    assert isinstance(entry, r.DNSRecord)
    assert isinstance(entry, r.DNSEntry)

    generated.add_answer_at_time(entry, 0)
    zc.send(generated)
    time.sleep(0.2)
    zc.close()
    assert zc.cache.get(entry) is not None


def test_goodbye_all_services():
    """Verify generating the goodbye query does not change with time."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    out = zc.generate_unregister_all_services()
    assert out is None
    type_ = "_http._tcp.local."
    registration_name = f"xxxyyy.{type_}"
    desc = {"path": "/~paulsm/"}
    info = r.ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    out = zc.generate_unregister_all_services()
    assert out is not None
    first_packet = out.packets()
    zc.registry.async_add(info)
    out2 = zc.generate_unregister_all_services()
    assert out2 is not None
    second_packet = out.packets()
    assert second_packet == first_packet

    # Verify the registry is empty
    out3 = zc.generate_unregister_all_services()
    assert out3 is None
    assert zc.registry.async_get_service_infos() == []

    zc.close()


def test_register_service_with_custom_ttl():
    """Test a registering a service with a custom ttl."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # start a browser
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    info_service = r.ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-90.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    zc.register_service(info_service, ttl=3000)
    record = zc.cache.get(info_service.dns_pointer())
    assert record is not None
    assert record.ttl == 3000
    zc.close()


def test_logging_packets(caplog):
    """Test packets are only logged with debug logging."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # start a browser
    type_ = "_logging._tcp.local."
    name = "TLD"
    info_service = r.ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-90.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    logging.getLogger("zeroconf").setLevel(logging.DEBUG)
    caplog.clear()
    zc.register_service(info_service, ttl=3000)
    assert "Sending to" in caplog.text
    record = zc.cache.get(info_service.dns_pointer())
    assert record is not None
    assert record.ttl == 3000
    logging.getLogger("zeroconf").setLevel(logging.INFO)
    caplog.clear()
    zc.unregister_service(info_service)
    assert "Sending to" not in caplog.text
    logging.getLogger("zeroconf").setLevel(logging.DEBUG)

    zc.close()


def test_get_service_info_failure_path():
    """Verify get_service_info return None when the underlying call returns False."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None
    zc.close()


def test_sending_unicast():
    """Test sending unicast response."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    entry = r.DNSText(
        "didnotcrashincoming._crash._tcp.local.",
        const._TYPE_TXT,
        const._CLASS_IN | const._CLASS_UNIQUE,
        500,
        b"path=/~paulsm/",
    )
    generated.add_answer_at_time(entry, 0)
    zc.send(generated, "2001:db8::1", const._MDNS_PORT)  # https://www.iana.org/go/rfc3849
    time.sleep(0.2)
    assert zc.cache.get(entry) is None

    zc.send(generated, "198.51.100.0", const._MDNS_PORT)  # Documentation (TEST-NET-2)
    time.sleep(0.2)
    assert zc.cache.get(entry) is None

    zc.send(generated)

    # Handle slow github CI runners on windows
    for _ in range(10):
        time.sleep(0.05)
        if zc.cache.get(entry) is not None:
            break

    assert zc.cache.get(entry) is not None

    zc.close()


def test_tc_bit_defers():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    _wait_for_start(zc)
    type_ = "_tcbitdefer._tcp.local."
    name = "knownname"
    name2 = "knownname2"
    name3 = "knownname3"

    registration_name = f"{name}.{type_}"
    registration2_name = f"{name2}.{type_}"
    registration3_name = f"{name3}.{type_}"

    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    server_name2 = "ash-3.local."
    server_name3 = "ash-4.local."

    info = r.ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = r.ServiceInfo(
        type_,
        registration2_name,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info3 = r.ServiceInfo(
        type_,
        registration3_name,
        80,
        0,
        0,
        desc,
        server_name3,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    zc.registry.async_add(info2)
    zc.registry.async_add(info3)

    protocol = zc.engine.protocols[0]
    now = r.current_time_millis()
    _clear_cache(zc)

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    for _ in range(300):
        # Add so many answers we end up with another packet
        generated.add_answer_at_time(info.dns_pointer(), now)
    generated.add_answer_at_time(info2.dns_pointer(), now)
    generated.add_answer_at_time(info3.dns_pointer(), now)
    packets = generated.packets()
    assert len(packets) == 4
    expected_deferred = []
    source_ip = "203.0.113.13"

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert source_ip not in protocol._deferred
    assert source_ip not in protocol._timers

    # unregister
    zc.unregister_service(info)
    zc.close()


def test_tc_bit_defers_last_response_missing():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    _wait_for_start(zc)
    type_ = "_knowndefer._tcp.local."
    name = "knownname"
    name2 = "knownname2"
    name3 = "knownname3"

    registration_name = f"{name}.{type_}"
    registration2_name = f"{name2}.{type_}"
    registration3_name = f"{name3}.{type_}"

    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    server_name2 = "ash-3.local."
    server_name3 = "ash-4.local."

    info = r.ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = r.ServiceInfo(
        type_,
        registration2_name,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info3 = r.ServiceInfo(
        type_,
        registration3_name,
        80,
        0,
        0,
        desc,
        server_name3,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    zc.registry.async_add(info2)
    zc.registry.async_add(info3)

    protocol = zc.engine.protocols[0]
    now = r.current_time_millis()
    _clear_cache(zc)
    source_ip = "203.0.113.12"

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    for _ in range(300):
        # Add so many answers we end up with another packet
        generated.add_answer_at_time(info.dns_pointer(), now)
    generated.add_answer_at_time(info2.dns_pointer(), now)
    generated.add_answer_at_time(info3.dns_pointer(), now)
    packets = generated.packets()
    assert len(packets) == 4
    expected_deferred = []

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    timer1 = protocol._timers[source_ip]

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    timer2 = protocol._timers[source_ip]
    assert timer1.cancelled()
    assert timer2 != timer1

    # Send the same packet again to similar multi interfaces
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers
    timer3 = protocol._timers[source_ip]
    assert not timer3.cancelled()
    assert timer3 == timer2

    next_packet = r.DNSIncoming(packets.pop(0))
    expected_deferred.append(next_packet)
    threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
    assert protocol._deferred[source_ip] == expected_deferred
    assert source_ip in protocol._timers
    timer4 = protocol._timers[source_ip]
    assert timer3.cancelled()
    assert timer4 != timer3

    for _ in range(8):
        time.sleep(0.1)
        if source_ip not in protocol._timers and source_ip not in protocol._deferred:
            break

    assert source_ip not in protocol._deferred
    assert source_ip not in protocol._timers

    # unregister
    zc.registry.async_remove(info)
    zc.close()


@pytest.mark.asyncio
async def test_open_close_twice_from_async() -> None:
    """Test we can close twice from a coroutine when using Zeroconf.

    Ideally callers switch to using AsyncZeroconf, however there will
    be a period where they still call the sync wrapper that we want
    to ensure will not deadlock on shutdown.

    This test is expected to throw warnings about tasks being destroyed
    since we force shutdown right away since we don't want to block
    callers event loops and since they aren't using the AsyncZeroconf
    version they won't yield with an await like async_close we don't
    have much choice but to force things down.
    """
    zc = Zeroconf(interfaces=["127.0.0.1"])
    zc.close()
    zc.close()
    await asyncio.sleep(0)


@pytest.mark.asyncio
async def test_multiple_sync_instances_stared_from_async_close():
    """Test we can shutdown multiple sync instances from async."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])
    zc2 = Zeroconf(interfaces=["127.0.0.1"])
    assert zc.loop is not None
    assert zc2.loop is not None

    assert zc.loop == zc2.loop
    zc.close()
    assert zc.loop.is_running()
    zc2.close()
    assert zc2.loop.is_running()

    zc3 = Zeroconf(interfaces=["127.0.0.1"])
    assert zc3.loop == zc2.loop

    zc3.close()
    assert zc3.loop.is_running()

    await asyncio.sleep(0)


def test_shutdown_while_register_in_process():
    """Test we can shutdown while registering a service in another thread."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # start a browser
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    info_service = r.ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-90.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    def _background_register():
        zc.register_service(info_service)

    bgthread = threading.Thread(target=_background_register, daemon=True)
    bgthread.start()
    time.sleep(0.3)

    zc.close()
    bgthread.join()


@pytest.mark.asyncio
@patch("zeroconf._core._STARTUP_TIMEOUT", 0)
@patch("zeroconf._core.AsyncEngine._async_setup", new_callable=AsyncMock)
async def test_event_loop_blocked(mock_start):
    """Test we raise NotRunningException when waiting for startup that times out."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    with pytest.raises(NotRunningException):
        await aiozc.zeroconf.async_wait_for_start()
    assert aiozc.zeroconf.started is False
07070100000077000081A400000000000000000000000167C7AD1600003DF9000000000000000000000000000000000000002A00000000python-zeroconf-0.146.0/tests/test_dns.py"""Unit tests for zeroconf._dns."""

from __future__ import annotations

import logging
import os
import socket
import unittest.mock

import pytest

import zeroconf as r
from zeroconf import DNSHinfo, DNSText, ServiceInfo, const, current_time_millis
from zeroconf._dns import DNSRRSet

from . import has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class TestDunder(unittest.TestCase):
    def test_dns_text_repr(self):
        # There was an issue on Python 3 that prevented DNSText's repr
        # from working when the text was longer than 10 bytes
        text = DNSText("irrelevant", 0, 0, 0, b"12345678901")
        repr(text)

        text = DNSText("irrelevant", 0, 0, 0, b"123")
        repr(text)

    def test_dns_hinfo_repr_eq(self):
        hinfo = DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "os")
        assert hinfo == hinfo
        repr(hinfo)

    def test_dns_pointer_repr(self):
        pointer = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123")
        repr(pointer)

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_dns_address_repr(self):
        address = r.DNSAddress("irrelevant", const._TYPE_SOA, const._CLASS_IN, 1, b"a")
        assert repr(address).endswith("b'a'")

        address_ipv4 = r.DNSAddress(
            "irrelevant",
            const._TYPE_SOA,
            const._CLASS_IN,
            1,
            socket.inet_pton(socket.AF_INET, "127.0.0.1"),
        )
        assert repr(address_ipv4).endswith("127.0.0.1")

        address_ipv6 = r.DNSAddress(
            "irrelevant",
            const._TYPE_SOA,
            const._CLASS_IN,
            1,
            socket.inet_pton(socket.AF_INET6, "::1"),
        )
        assert repr(address_ipv6).endswith("::1")

    def test_dns_question_repr(self):
        question = r.DNSQuestion("irrelevant", const._TYPE_SRV, const._CLASS_IN | const._CLASS_UNIQUE)
        repr(question)
        assert not question != question

    def test_dns_service_repr(self):
        service = r.DNSService(
            "irrelevant",
            const._TYPE_SRV,
            const._CLASS_IN,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "a",
        )
        repr(service)

    def test_dns_record_abc(self):
        record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
        self.assertRaises(r.AbstractMethodException, record.__eq__, record)
        with pytest.raises((r.AbstractMethodException, TypeError)):
            record.write(None)  # type: ignore[arg-type]

    def test_service_info_dunder(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            b"",
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        assert not info != info
        repr(info)

    def test_service_info_text_properties_not_given(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"
        info = ServiceInfo(
            type_=type_,
            name=registration_name,
            addresses=[socket.inet_aton("10.0.1.2")],
            port=80,
            server="ash-2.local.",
        )

        assert isinstance(info.text, bytes)
        repr(info)

    def test_dns_outgoing_repr(self):
        dns_outgoing = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        repr(dns_outgoing)

    def test_dns_record_is_expired(self):
        record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8)
        now = current_time_millis()
        assert record.is_expired(now) is False
        assert record.is_expired(now + (8 / 2 * 1000)) is False
        assert record.is_expired(now + (8 * 1000)) is True

    def test_dns_record_is_stale(self):
        record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8)
        now = current_time_millis()
        assert record.is_stale(now) is False
        assert record.is_stale(now + (8 / 4.1 * 1000)) is False
        assert record.is_stale(now + (8 / 1.9 * 1000)) is True
        assert record.is_stale(now + (8 * 1000)) is True

    def test_dns_record_is_recent(self):
        now = current_time_millis()
        record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8)
        assert record.is_recent(now + (8 / 4.2 * 1000)) is True
        assert record.is_recent(now + (8 / 3 * 1000)) is False
        assert record.is_recent(now + (8 / 2 * 1000)) is False
        assert record.is_recent(now + (8 * 1000)) is False


def test_dns_question_hashablity():
    """Test DNSQuestions are hashable."""

    record1 = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN)
    record2 = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN)

    record_set = {record1, record2}
    assert len(record_set) == 1

    record_set.add(record1)
    assert len(record_set) == 1

    record3_dupe = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN)
    assert record2 == record3_dupe
    assert record2.__hash__() == record3_dupe.__hash__()

    record_set.add(record3_dupe)
    assert len(record_set) == 1

    record4_dupe = r.DNSQuestion("notsame", const._TYPE_A, const._CLASS_IN)
    assert record2 != record4_dupe
    assert record2.__hash__() != record4_dupe.__hash__()

    record_set.add(record4_dupe)
    assert len(record_set) == 2


def test_dns_record_hashablity_does_not_consider_ttl():
    """Test DNSRecord are hashable."""

    # Verify the TTL is not considered in the hash
    record1 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b"same")
    record2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same")

    record_set = {record1, record2}
    assert len(record_set) == 1

    record_set.add(record1)
    assert len(record_set) == 1

    record3_dupe = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same")
    assert record2 == record3_dupe
    assert record2.__hash__() == record3_dupe.__hash__()

    record_set.add(record3_dupe)
    assert len(record_set) == 1


def test_dns_record_hashablity_does_not_consider_created():
    """Test DNSRecord are hashable and created is not considered."""

    # Verify the TTL is not considered in the hash
    record1 = r.DNSAddress(
        "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=1.0
    )
    record2 = r.DNSAddress(
        "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=2.0
    )

    record_set = {record1, record2}
    assert len(record_set) == 1

    record_set.add(record1)
    assert len(record_set) == 1

    record3_dupe = r.DNSAddress(
        "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=3.0
    )
    assert record2 == record3_dupe
    assert record2.__hash__() == record3_dupe.__hash__()

    record_set.add(record3_dupe)
    assert len(record_set) == 1


def test_dns_record_hashablity_does_not_consider_unique():
    """Test DNSRecord are hashable and unique is ignored."""

    # Verify the unique value is not considered in the hash
    record1 = r.DNSAddress(
        "irrelevant",
        const._TYPE_A,
        const._CLASS_IN | const._CLASS_UNIQUE,
        const._DNS_OTHER_TTL,
        b"same",
    )
    record2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b"same")

    assert record1.class_ == record2.class_
    assert record1.__hash__() == record2.__hash__()
    record_set = {record1, record2}
    assert len(record_set) == 1


def test_dns_address_record_hashablity():
    """Test DNSAddress are hashable."""
    address1 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"a")
    address2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"b")
    address3 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"c")
    address4 = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 1, b"c")

    record_set = {address1, address2, address3, address4}
    assert len(record_set) == 4

    record_set.add(address1)
    assert len(record_set) == 4

    address3_dupe = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"c")

    record_set.add(address3_dupe)
    assert len(record_set) == 4

    # Verify we can remove records
    additional_set = {address1, address2}
    record_set -= additional_set
    assert record_set == {address3, address4}


def test_dns_hinfo_record_hashablity():
    """Test DNSHinfo are hashable."""
    hinfo1 = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu1", "os")
    hinfo2 = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu2", "os")

    record_set = {hinfo1, hinfo2}
    assert len(record_set) == 2

    record_set.add(hinfo1)
    assert len(record_set) == 2

    hinfo2_dupe = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu2", "os")
    assert hinfo2 == hinfo2_dupe
    assert hinfo2.__hash__() == hinfo2_dupe.__hash__()

    record_set.add(hinfo2_dupe)
    assert len(record_set) == 2


def test_dns_pointer_record_hashablity():
    """Test DNSPointer are hashable."""
    ptr1 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123")
    ptr2 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "456")

    record_set = {ptr1, ptr2}
    assert len(record_set) == 2

    record_set.add(ptr1)
    assert len(record_set) == 2

    ptr2_dupe = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "456")
    assert ptr2 == ptr2
    assert ptr2.__hash__() == ptr2_dupe.__hash__()

    record_set.add(ptr2_dupe)
    assert len(record_set) == 2


def test_dns_pointer_comparison_is_case_insensitive():
    """Test DNSPointer comparison is case insensitive."""
    ptr1 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123")
    ptr2 = r.DNSPointer(
        "irrelevant".upper(),
        const._TYPE_PTR,
        const._CLASS_IN,
        const._DNS_OTHER_TTL,
        "123",
    )

    assert ptr1 == ptr2


def test_dns_text_record_hashablity():
    """Test DNSText are hashable."""
    text1 = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"12345678901")
    text2 = r.DNSText("irrelevant", 1, 0, const._DNS_OTHER_TTL, b"12345678901")
    text3 = r.DNSText("irrelevant", 0, 1, const._DNS_OTHER_TTL, b"12345678901")
    text4 = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"ABCDEFGHIJK")

    record_set = {text1, text2, text3, text4}

    assert len(record_set) == 4

    record_set.add(text1)
    assert len(record_set) == 4

    text1_dupe = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"12345678901")
    assert text1 == text1_dupe
    assert text1.__hash__() == text1_dupe.__hash__()

    record_set.add(text1_dupe)
    assert len(record_set) == 4


def test_dns_service_record_hashablity():
    """Test DNSService are hashable."""
    srv1 = r.DNSService(
        "irrelevant",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "a",
    )
    srv2 = r.DNSService(
        "irrelevant",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        1,
        80,
        "a",
    )
    srv3 = r.DNSService(
        "irrelevant",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        81,
        "a",
    )
    srv4 = r.DNSService(
        "irrelevant",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "ab",
    )

    record_set = {srv1, srv2, srv3, srv4}

    assert len(record_set) == 4

    record_set.add(srv1)
    assert len(record_set) == 4

    srv1_dupe = r.DNSService(
        "irrelevant",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "a",
    )
    assert srv1 == srv1_dupe
    assert srv1.__hash__() == srv1_dupe.__hash__()

    record_set.add(srv1_dupe)
    assert len(record_set) == 4


def test_dns_service_server_key():
    """Test DNSService server_key is lowercase."""
    srv1 = r.DNSService(
        "X._tcp._http.local.",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "X.local.",
    )
    assert srv1.name == "X._tcp._http.local."
    assert srv1.key == "x._tcp._http.local."
    assert srv1.server == "X.local."
    assert srv1.server_key == "x.local."


def test_dns_service_server_comparison_is_case_insensitive():
    """Test DNSService server comparison is case insensitive."""
    srv1 = r.DNSService(
        "X._tcp._http.local.",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "X.local.",
    )
    srv2 = r.DNSService(
        "X._tcp._http.local.",
        const._TYPE_SRV,
        const._CLASS_IN,
        const._DNS_HOST_TTL,
        0,
        0,
        80,
        "x.local.",
    )
    assert srv1 == srv2


def test_dns_nsec_record_hashablity():
    """Test DNSNsec are hashable."""
    nsec1 = r.DNSNsec(
        "irrelevant",
        const._TYPE_PTR,
        const._CLASS_IN,
        const._DNS_OTHER_TTL,
        "irrelevant",
        [1, 2, 3],
    )
    nsec2 = r.DNSNsec(
        "irrelevant",
        const._TYPE_PTR,
        const._CLASS_IN,
        const._DNS_OTHER_TTL,
        "irrelevant",
        [1, 2],
    )

    record_set = {nsec1, nsec2}
    assert len(record_set) == 2

    record_set.add(nsec1)
    assert len(record_set) == 2

    nsec2_dupe = r.DNSNsec(
        "irrelevant",
        const._TYPE_PTR,
        const._CLASS_IN,
        const._DNS_OTHER_TTL,
        "irrelevant",
        [1, 2],
    )
    assert nsec2 == nsec2_dupe
    assert nsec2.__hash__() == nsec2_dupe.__hash__()

    record_set.add(nsec2_dupe)
    assert len(record_set) == 2


def test_rrset_does_not_consider_ttl():
    """Test DNSRRSet does not consider the ttl in the hash."""

    longarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 100, b"same")
    shortarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 10, b"same")
    longaaaarec = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 100, b"same")
    shortaaaarec = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 10, b"same")

    rrset = DNSRRSet([longarec, shortaaaarec])

    assert rrset.suppresses(longarec)
    assert rrset.suppresses(shortarec)
    assert not rrset.suppresses(longaaaarec)
    assert rrset.suppresses(shortaaaarec)

    verylongarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1000, b"same")
    longarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 100, b"same")
    mediumarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 60, b"same")
    shortarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 10, b"same")

    rrset2 = DNSRRSet([mediumarec])
    assert not rrset2.suppresses(verylongarec)
    assert rrset2.suppresses(longarec)
    assert rrset2.suppresses(mediumarec)
    assert rrset2.suppresses(shortarec)
07070100000078000081A400000000000000000000000167C7AD1600000D37000000000000000000000000000000000000002D00000000python-zeroconf-0.146.0/tests/test_engine.py"""Unit tests for zeroconf._engine"""

from __future__ import annotations

import asyncio
import itertools
import logging
from unittest.mock import patch

import pytest

import zeroconf as r
from zeroconf import _engine, const
from zeroconf.asyncio import AsyncZeroconf

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_reaper():
    with patch.object(_engine, "_CACHE_CLEANUP_INTERVAL", 0.01):
        aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
        zeroconf = aiozc.zeroconf
        cache = zeroconf.cache
        original_entries = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
        record_with_10s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 10, b"a")
        record_with_1s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b")
        zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl])
        question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
        now = r.current_time_millis()
        other_known_answers: set[r.DNSRecord] = {
            r.DNSPointer(
                "_hap._tcp.local.",
                const._TYPE_PTR,
                const._CLASS_IN,
                10000,
                "known-to-other._hap._tcp.local.",
            )
        }
        zeroconf.question_history.add_question_at_time(question, now, other_known_answers)
        assert zeroconf.question_history.suppresses(question, now, other_known_answers)
        entries_with_cache = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
        await asyncio.sleep(1.2)
        entries = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
        assert zeroconf.cache.get(record_with_1s_ttl) is None
        await aiozc.async_close()
        assert not zeroconf.question_history.suppresses(question, now, other_known_answers)
        assert entries != original_entries
        assert entries_with_cache != original_entries
        assert record_with_10s_ttl in entries
        assert record_with_1s_ttl not in entries


@pytest.mark.asyncio
async def test_reaper_aborts_when_done():
    """Ensure cache cleanup stops when zeroconf is done."""
    with patch.object(_engine, "_CACHE_CLEANUP_INTERVAL", 0.01):
        aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
        zeroconf = aiozc.zeroconf
        record_with_10s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 10, b"a")
        record_with_1s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b")
        zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl])
        assert zeroconf.cache.get(record_with_10s_ttl) is not None
        assert zeroconf.cache.get(record_with_1s_ttl) is not None
        await aiozc.async_close()
        await asyncio.sleep(1.2)
        assert zeroconf.cache.get(record_with_10s_ttl) is not None
        assert zeroconf.cache.get(record_with_1s_ttl) is not None
07070100000079000081A400000000000000000000000167C7AD1600001417000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/tests/test_exceptions.py"""Unit tests for zeroconf._exceptions"""

from __future__ import annotations

import logging
import unittest.mock

import zeroconf as r
from zeroconf import ServiceInfo, Zeroconf

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class Exceptions(unittest.TestCase):
    browser = None  # type: Zeroconf

    @classmethod
    def setUpClass(cls):
        cls.browser = Zeroconf(interfaces=["127.0.0.1"])

    @classmethod
    def tearDownClass(cls):
        cls.browser.close()
        del cls.browser

    def test_bad_service_info_name(self):
        self.assertRaises(r.BadTypeInNameException, self.browser.get_service_info, "type", "type_not")

    def test_bad_service_names(self):
        bad_names_to_try = (
            "",
            "local",
            "_tcp.local.",
            "_udp.local.",
            "._udp.local.",
            "_@._tcp.local.",
            "_A@._tcp.local.",
            "_x--x._tcp.local.",
            "_-x._udp.local.",
            "_x-._tcp.local.",
            "_22._udp.local.",
            "_2-2._tcp.local.",
            "\x00._x._udp.local.",
        )
        for name in bad_names_to_try:
            self.assertRaises(
                r.BadTypeInNameException,
                self.browser.get_service_info,
                name,
                "x." + name,
            )

    def test_bad_local_names_for_get_service_info(self):
        bad_names_to_try = (
            "homekitdev._nothttp._tcp.local.",
            "homekitdev._http._udp.local.",
        )
        for name in bad_names_to_try:
            self.assertRaises(
                r.BadTypeInNameException,
                self.browser.get_service_info,
                "_http._tcp.local.",
                name,
            )

    def test_good_instance_names(self):
        assert r.service_type_name(".._x._tcp.local.") == "_x._tcp.local."
        assert r.service_type_name("x.y._http._tcp.local.") == "_http._tcp.local."
        assert r.service_type_name("1.2.3._mqtt._tcp.local.") == "_mqtt._tcp.local."
        assert r.service_type_name("x.sub._http._tcp.local.") == "_http._tcp.local."
        assert (
            r.service_type_name("6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.")
            == "_http._tcp.local."
        )

    def test_good_instance_names_without_protocol(self):
        good_names_to_try = (
            "Rachio-C73233.local.",
            "YeelightColorBulb-3AFD.local.",
            "YeelightTunableBulb-7220.local.",
            "AlexanderHomeAssistant 74651D.local.",
            "iSmartGate-152.local.",
            "MyQ-FGA.local.",
            "lutron-02c4392a.local.",
            "WICED-hap-3E2734.local.",
            "MyHost.local.",
            "MyHost.sub.local.",
        )
        for name in good_names_to_try:
            assert r.service_type_name(name, strict=False) == "local."

        for name in good_names_to_try:
            # Raises without strict=False
            self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)

    def test_bad_types(self):
        bad_names_to_try = (
            "._x._tcp.local.",
            "a" * 64 + "._sub._http._tcp.local.",
            "a" * 62 + "â._sub._http._tcp.local.",
        )
        for name in bad_names_to_try:
            self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)

    def test_bad_sub_types(self):
        bad_names_to_try = (
            "_sub._http._tcp.local.",
            "._sub._http._tcp.local.",
            "\x7f._sub._http._tcp.local.",
            "\x1f._sub._http._tcp.local.",
        )
        for name in bad_names_to_try:
            self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)

    def test_good_service_names(self):
        good_names_to_try = (
            ("_x._tcp.local.", "_x._tcp.local."),
            ("_x._udp.local.", "_x._udp.local."),
            ("_12345-67890-abc._udp.local.", "_12345-67890-abc._udp.local."),
            ("x._sub._http._tcp.local.", "_http._tcp.local."),
            ("a" * 63 + "._sub._http._tcp.local.", "_http._tcp.local."),
            ("a" * 61 + "â._sub._http._tcp.local.", "_http._tcp.local."),
        )

        for name, result in good_names_to_try:
            assert r.service_type_name(name) == result

        assert r.service_type_name("_one_two._tcp.local.", strict=False) == "_one_two._tcp.local."

    def test_invalid_addresses(self):
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"

        bad = (b"127.0.0.1", b"::1")
        for addr in bad:
            self.assertRaisesRegex(
                TypeError,
                "Addresses must either ",
                ServiceInfo,
                type_,
                registration_name,
                port=80,
                addresses=[addr],
            )
0707010000007A000081A400000000000000000000000167C7AD160001261C000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/tests/test_handlers.py"""Unit tests for zeroconf._handlers"""

from __future__ import annotations

import asyncio
import logging
import os
import socket
import time
import unittest
import unittest.mock
from typing import cast
from unittest.mock import patch

import pytest

import zeroconf as r
from zeroconf import ServiceInfo, Zeroconf, const, current_time_millis
from zeroconf._handlers.multicast_outgoing_queue import (
    MulticastOutgoingQueue,
    construct_outgoing_multicast_answers,
)
from zeroconf._utils.time import millis_to_seconds
from zeroconf.asyncio import AsyncZeroconf

from . import _clear_cache, _inject_response, has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class TestRegistrar(unittest.TestCase):
    def test_ttl(self):
        # instantiate a zeroconf instance
        zc = Zeroconf(interfaces=["127.0.0.1"])

        # service definition
        type_ = "_test-srvc-type._tcp.local."
        name = "xxxyyy"
        registration_name = f"{name}.{type_}"

        desc = {"path": "/~paulsm/"}
        info = ServiceInfo(
            type_,
            registration_name,
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        nbr_answers = nbr_additionals = nbr_authorities = 0

        def get_ttl(record_type):
            if expected_ttl is not None:
                return expected_ttl
            if record_type in [const._TYPE_A, const._TYPE_SRV, const._TYPE_NSEC]:
                return const._DNS_HOST_TTL
            return const._DNS_OTHER_TTL

        def _process_outgoing_packet(out):
            """Sends an outgoing packet."""
            nonlocal nbr_answers, nbr_additionals, nbr_authorities

            for answer, _ in out.answers:
                nbr_answers += 1
                assert answer.ttl == get_ttl(answer.type)
            for answer in out.additionals:
                nbr_additionals += 1
                assert answer.ttl == get_ttl(answer.type)
            for answer in out.authorities:
                nbr_authorities += 1
                assert answer.ttl == get_ttl(answer.type)

        # register service with default TTL
        expected_ttl = None
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_query(info))
        zc.registry.async_add(info)
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_broadcast(info, None))
        assert nbr_answers == 15 and nbr_additionals == 0 and nbr_authorities == 3
        nbr_answers = nbr_additionals = nbr_authorities = 0

        # query
        query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
        assert query.is_query() is True
        query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.name, const._TYPE_SRV, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.name, const._TYPE_TXT, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.server or info.name, const._TYPE_A, const._CLASS_IN))
        question_answers = zc.query_handler.async_response(
            [r.DNSIncoming(packet) for packet in query.packets()], False
        )
        assert question_answers
        _process_outgoing_packet(construct_outgoing_multicast_answers(question_answers.mcast_aggregate))

        # The additonals should all be suppressed since they are all in the answers section
        # There will be one NSEC additional to indicate the lack of AAAA record
        #
        assert nbr_answers == 4 and nbr_additionals == 1 and nbr_authorities == 0
        nbr_answers = nbr_additionals = nbr_authorities = 0

        # unregister
        expected_ttl = 0
        zc.registry.async_remove(info)
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_broadcast(info, 0))
        assert nbr_answers == 15 and nbr_additionals == 0 and nbr_authorities == 0
        nbr_answers = nbr_additionals = nbr_authorities = 0

        expected_ttl = None
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_query(info))
        zc.registry.async_add(info)
        # register service with custom TTL
        expected_ttl = const._DNS_HOST_TTL * 2
        assert expected_ttl != const._DNS_HOST_TTL
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_broadcast(info, expected_ttl))
        assert nbr_answers == 15 and nbr_additionals == 0 and nbr_authorities == 3
        nbr_answers = nbr_additionals = nbr_authorities = 0

        # query
        expected_ttl = None
        query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
        query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.name, const._TYPE_SRV, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.name, const._TYPE_TXT, const._CLASS_IN))
        query.add_question(r.DNSQuestion(info.server or info.name, const._TYPE_A, const._CLASS_IN))
        question_answers = zc.query_handler.async_response(
            [r.DNSIncoming(packet) for packet in query.packets()], False
        )
        assert question_answers
        _process_outgoing_packet(construct_outgoing_multicast_answers(question_answers.mcast_aggregate))

        # There will be one NSEC additional to indicate the lack of AAAA record
        assert nbr_answers == 4 and nbr_additionals == 1 and nbr_authorities == 0
        nbr_answers = nbr_additionals = nbr_authorities = 0

        # unregister
        expected_ttl = 0
        zc.registry.async_remove(info)
        for _ in range(3):
            _process_outgoing_packet(zc.generate_service_broadcast(info, 0))
        assert nbr_answers == 15 and nbr_additionals == 0 and nbr_authorities == 0
        nbr_answers = nbr_additionals = nbr_authorities = 0
        zc.close()

    def test_name_conflicts(self):
        # instantiate a zeroconf instance
        zc = Zeroconf(interfaces=["127.0.0.1"])
        type_ = "_homeassistant._tcp.local."
        name = "Home"
        registration_name = f"{name}.{type_}"

        info = ServiceInfo(
            type_,
            name=registration_name,
            server="random123.local.",
            addresses=[socket.inet_pton(socket.AF_INET, "1.2.3.4")],
            port=80,
            properties={"version": "1.0"},
        )
        zc.register_service(info)

        conflicting_info = ServiceInfo(
            type_,
            name=registration_name,
            server="random456.local.",
            addresses=[socket.inet_pton(socket.AF_INET, "4.5.6.7")],
            port=80,
            properties={"version": "1.0"},
        )
        with pytest.raises(r.NonUniqueNameException):
            zc.register_service(conflicting_info)
        zc.close()

    def test_register_and_lookup_type_by_uppercase_name(self):
        # instantiate a zeroconf instance
        zc = Zeroconf(interfaces=["127.0.0.1"])
        type_ = "_mylowertype._tcp.local."
        name = "Home"
        registration_name = f"{name}.{type_}"

        info = ServiceInfo(
            type_,
            name=registration_name,
            server="random123.local.",
            addresses=[socket.inet_pton(socket.AF_INET, "1.2.3.4")],
            port=80,
            properties={"version": "1.0"},
        )
        zc.register_service(info)
        _clear_cache(zc)
        info = ServiceInfo(type_, registration_name)
        info.load_from_cache(zc)
        assert info.addresses == []

        out = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        out.add_question(r.DNSQuestion(type_.upper(), const._TYPE_PTR, const._CLASS_IN))
        zc.send(out)
        time.sleep(1)
        info = ServiceInfo(type_, registration_name)
        info.load_from_cache(zc)
        assert info.addresses == [socket.inet_pton(socket.AF_INET, "1.2.3.4")]
        assert info.properties == {b"version": b"1.0"}
        zc.close()


def test_ptr_optimization():
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    # register
    zc.register_service(info)

    # Verify we won't respond for 1s with the same multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    # Since we sent the PTR in the last second, they
    # should end up in the delayed at least one second bucket
    assert question_answers.mcast_aggregate_last_second

    # Clear the cache to allow responding again
    _clear_cache(zc)

    # Verify we will now respond
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate_last_second
    has_srv = has_txt = has_a = False
    nbr_additionals = 0
    nbr_answers = len(question_answers.mcast_aggregate)
    additionals = set().union(*question_answers.mcast_aggregate.values())
    for answer in additionals:
        nbr_additionals += 1
        if answer.type == const._TYPE_SRV:
            has_srv = True
        elif answer.type == const._TYPE_TXT:
            has_txt = True
        elif answer.type == const._TYPE_A:
            has_a = True
    assert nbr_answers == 1 and nbr_additionals == 4
    # There will be one NSEC additional to indicate the lack of AAAA record

    assert has_srv and has_txt and has_a

    # unregister
    zc.unregister_service(info)
    zc.close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_any_query_for_ptr():
    """Test that queries for ANY will return PTR records and the response is aggregated."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_anyptr._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address])
    zc.registry.async_add(info)

    _clear_cache(zc)
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_ANY, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    mcast_answers = list(question_answers.mcast_aggregate)
    assert mcast_answers[0].name == type_
    assert mcast_answers[0].alias == registration_name  # type: ignore[attr-defined]
    # unregister
    zc.registry.async_remove(info)
    zc.close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_aaaa_query():
    """Test that queries for AAAA records work and should respond right away."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_knownaaaservice._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address])
    zc.registry.async_add(info)

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    mcast_answers = list(question_answers.mcast_now)
    assert mcast_answers[0].address == ipv6_address  # type: ignore[attr-defined]
    # unregister
    zc.registry.async_remove(info)
    zc.close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_aaaa_query_upper_case():
    """Test that queries for AAAA records work and should respond right away with an upper case name."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_knownaaaservice._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
    info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address])
    zc.registry.async_add(info)

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name.upper(), const._TYPE_AAAA, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    mcast_answers = list(question_answers.mcast_now)
    assert mcast_answers[0].address == ipv6_address  # type: ignore[attr-defined]
    # unregister
    zc.registry.async_remove(info)
    zc.close()


@unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
def test_a_and_aaaa_record_fate_sharing():
    """Test that queries for AAAA always return A records in the additionals and should respond right away."""
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_a-and-aaaa-service._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
    ipv4_address = socket.inet_aton("10.0.1.2")
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[ipv6_address, ipv4_address],
    )
    aaaa_record = info.dns_addresses(version=r.IPVersion.V6Only)[0]
    a_record = info.dns_addresses(version=r.IPVersion.V4Only)[0]

    zc.registry.async_add(info)

    # Test AAAA query
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    additionals = set().union(*question_answers.mcast_now.values())
    assert aaaa_record in question_answers.mcast_now
    assert a_record in additionals
    assert len(question_answers.mcast_now) == 1
    assert len(additionals) == 1

    # Test A query
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    additionals = set().union(*question_answers.mcast_now.values())
    assert a_record in question_answers.mcast_now
    assert aaaa_record in additionals
    assert len(question_answers.mcast_now) == 1
    assert len(additionals) == 1

    # unregister
    zc.registry.async_remove(info)
    zc.close()


def test_unicast_response():
    """Ensure we send a unicast response when the source port is not the MDNS port."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    # register
    zc.registry.async_add(info)
    _clear_cache(zc)

    # query
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], True
    )
    assert question_answers
    for answers in (question_answers.ucast, question_answers.mcast_aggregate):
        has_srv = has_txt = has_a = has_aaaa = has_nsec = False
        nbr_additionals = 0
        nbr_answers = len(answers)
        additionals = set().union(*answers.values())
        for answer in additionals:
            nbr_additionals += 1
            if answer.type == const._TYPE_SRV:
                has_srv = True
            elif answer.type == const._TYPE_TXT:
                has_txt = True
            elif answer.type == const._TYPE_A:
                has_a = True
            elif answer.type == const._TYPE_AAAA:
                has_aaaa = True
            elif answer.type == const._TYPE_NSEC:
                has_nsec = True
        # There will be one NSEC additional to indicate the lack of AAAA record
        assert nbr_answers == 1 and nbr_additionals == 4
        assert has_srv and has_txt and has_a and has_nsec
        assert not has_aaaa

    # unregister
    zc.registry.async_remove(info)
    zc.close()


@pytest.mark.asyncio
async def test_probe_answered_immediately():
    """Verify probes are responded to immediately."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    query.add_question(question)
    query.add_authorative_answer(info.dns_pointer())
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    assert question_answers.mcast_now

    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True
    query.add_question(question)
    query.add_authorative_answer(info.dns_pointer())
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert question_answers.ucast
    assert question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    zc.close()


@pytest.mark.asyncio
async def test_probe_answered_immediately_with_uppercase_name():
    """Verify probes are responded to immediately with an uppercase name."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type.upper(), const._TYPE_PTR, const._CLASS_IN)
    query.add_question(question)
    query.add_authorative_answer(info.dns_pointer())
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    assert question_answers.mcast_now

    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True
    query.add_question(question)
    query.add_authorative_answer(info.dns_pointer())
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert question_answers.ucast
    assert question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    zc.close()


def test_qu_response():
    """Handle multicast incoming with the QU bit set."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    other_type_ = "_notthesame._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name}.{other_type_}"
    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        other_type_,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-other.local.",
        addresses=[socket.inet_aton("10.0.4.2")],
    )
    # register
    zc.register_service(info)

    def _validate_complete_response(answers):
        has_srv = has_txt = has_a = has_aaaa = has_nsec = False
        nbr_answers = len(answers)
        additionals = set().union(*answers.values())
        nbr_additionals = len(additionals)

        for answer in additionals:
            if answer.type == const._TYPE_SRV:
                has_srv = True
            elif answer.type == const._TYPE_TXT:
                has_txt = True
            elif answer.type == const._TYPE_A:
                has_a = True
            elif answer.type == const._TYPE_AAAA:
                has_aaaa = True
            elif answer.type == const._TYPE_NSEC:
                has_nsec = True
        assert nbr_answers == 1 and nbr_additionals == 4
        assert has_srv and has_txt and has_a and has_nsec
        assert not has_aaaa

    # With QU should respond to only unicast when the answer has been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    _validate_complete_response(question_answers.ucast)
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    _clear_cache(zc)
    # With QU should respond to only multicast since the response hasn't been seen since 75% of the ttl
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate
    _validate_complete_response(question_answers.mcast_now)

    # With QU set and an authoritative answer (probe) should respond to both unitcast
    # and multicast since the response hasn't been seen since 75% of the ttl
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    query.add_authorative_answer(info2.dns_pointer())
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    _validate_complete_response(question_answers.ucast)
    _validate_complete_response(question_answers.mcast_now)

    _inject_response(
        zc,
        r.DNSIncoming(construct_outgoing_multicast_answers(question_answers.mcast_now).packets()[0]),
    )
    # With the cache repopulated; should respond to only unicast when the answer has been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    _validate_complete_response(question_answers.ucast)
    # unregister
    zc.unregister_service(info)
    zc.close()


def test_known_answer_supression():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_knownanswersv8._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)

    now = current_time_millis()
    _clear_cache(zc)
    # Test PTR suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_pointer(), now)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # Test A suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
    generated.add_question(question)
    for dns_address in info.dns_addresses():
        generated.add_answer_at_time(dns_address, now)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # Test NSEC record returned when there is no AAAA record and we expectly ask
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
    generated.add_question(question)
    for dns_address in info.dns_addresses():
        generated.add_answer_at_time(dns_address, now)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    expected_nsec_record = cast(r.DNSNsec, next(iter(question_answers.mcast_now)))
    assert const._TYPE_A not in expected_nsec_record.rdtypes
    assert const._TYPE_AAAA in expected_nsec_record.rdtypes
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # Test SRV suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_SRV, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_SRV, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_service(), now)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # Test TXT suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_TXT, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_TXT, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_text(), now)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # unregister
    zc.registry.async_remove(info)
    zc.close()


def test_multi_packet_known_answer_supression():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_handlermultis._tcp.local."
    name = "knownname"
    name2 = "knownname2"
    name3 = "knownname3"

    registration_name = f"{name}.{type_}"
    registration2_name = f"{name2}.{type_}"
    registration3_name = f"{name3}.{type_}"

    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    server_name2 = "ash-3.local."
    server_name3 = "ash-4.local."

    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_,
        registration2_name,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info3 = ServiceInfo(
        type_,
        registration3_name,
        80,
        0,
        0,
        desc,
        server_name3,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)
    zc.registry.async_add(info2)
    zc.registry.async_add(info3)

    now = current_time_millis()
    _clear_cache(zc)
    # Test PTR suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    for _ in range(1000):
        # Add so many answers we end up with another packet
        generated.add_answer_at_time(info.dns_pointer(), now)
    generated.add_answer_at_time(info2.dns_pointer(), now)
    generated.add_answer_at_time(info3.dns_pointer(), now)
    packets = generated.packets()
    assert len(packets) > 1
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    # unregister
    zc.registry.async_remove(info)
    zc.registry.async_remove(info2)
    zc.registry.async_remove(info3)
    zc.close()


def test_known_answer_supression_service_type_enumeration_query():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_otherknown._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)

    type_2 = "_otherknown2._tcp.local."
    name = "knownname"
    registration_name2 = f"{name}.{type_2}"
    desc = {"path": "/~paulsm/"}
    server_name2 = "ash-3.local."
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info2)
    now = current_time_millis()
    _clear_cache(zc)

    # Test PTR suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(
        r.DNSPointer(
            const._SERVICE_TYPE_ENUMERATION_NAME,
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            type_,
        ),
        now,
    )
    generated.add_answer_at_time(
        r.DNSPointer(
            const._SERVICE_TYPE_ENUMERATION_NAME,
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            type_2,
        ),
        now,
    )
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    # unregister
    zc.registry.async_remove(info)
    zc.registry.async_remove(info2)
    zc.close()


def test_upper_case_enumeration_query():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    type_ = "_otherknown._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)

    type_2 = "_otherknown2._tcp.local."
    name = "knownname"
    registration_name2 = f"{name}.{type_2}"
    desc = {"path": "/~paulsm/"}
    server_name2 = "ash-3.local."
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info2)
    _clear_cache(zc)

    # Test PTR suppression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME.upper(), const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    # unregister
    zc.registry.async_remove(info)
    zc.registry.async_remove(info2)
    zc.close()


def test_enumeration_query_with_no_registered_services():
    zc = Zeroconf(interfaces=["127.0.0.1"])
    _clear_cache(zc)
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME.upper(), const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert not question_answers
    # unregister
    zc.close()


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_qu_response_only_sends_additionals_if_sends_answer():
    """Test that a QU response does not send additionals unless it sends the answer as well."""
    # instantiate a zeroconf instance
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf

    type_ = "_addtest1._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "ash-2.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info)

    type_2 = "_addtest2._tcp.local."
    name = "knownname"
    registration_name2 = f"{name}.{type_2}"
    desc = {"path": "/~paulsm/"}
    server_name2 = "ash-3.local."
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        server_name2,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    zc.registry.async_add(info2)

    ptr_record = info.dns_pointer()

    # Add the PTR record to the cache
    zc.cache.async_add_records([ptr_record])

    # Add the A record to the cache with 50% ttl remaining
    a_record = info.dns_addresses()[0]
    zc.cache._async_set_created_ttl(a_record, current_time_millis() - (a_record.ttl * 1000 / 2), a_record.ttl)
    assert not a_record.is_recent(current_time_millis())
    info._dns_address_cache = None  # we are mutating the record so clear the cache

    # With QU should respond to only unicast when the answer has been recently multicast
    # even if the additional has not been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    additionals = set().union(*question_answers.ucast.values())
    assert a_record in additionals
    assert ptr_record in question_answers.ucast

    # Remove the 50% A record and add a 100% A record
    zc.cache.async_remove_records([a_record])
    a_record = info.dns_addresses()[0]
    assert a_record.is_recent(current_time_millis())
    zc.cache.async_add_records([a_record])
    # With QU should respond to only unicast when the answer has been recently multicast
    # even if the additional has not been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    additionals = set().union(*question_answers.ucast.values())
    assert a_record in additionals
    assert ptr_record in question_answers.ucast

    # Remove the 100% PTR record and add a 50% PTR record
    zc.cache.async_remove_records([ptr_record])
    zc.cache._async_set_created_ttl(
        ptr_record, current_time_millis() - (ptr_record.ttl * 1000 / 2), ptr_record.ttl
    )
    assert not ptr_record.is_recent(current_time_millis())
    # With QU should respond to only multicast since the has less
    # than 75% of its ttl remaining
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    additionals = set().union(*question_answers.mcast_now.values())
    assert a_record in additionals
    assert info.dns_text() in additionals
    assert info.dns_service() in additionals
    assert ptr_record in question_answers.mcast_now

    # Ask 2 QU questions, with info the PTR is at 50%, with info2 the PTR is at 100%
    # We should get back a unicast reply for info2, but info should be
    # multicasted since its within 75% of its TTL
    # With QU should respond to only multicast since the has less
    # than 75% of its ttl remaining
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    question = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    zc.cache.async_add_records([info2.dns_pointer()])  # Add 100% TTL for info2 to the cache

    question_answers = zc.query_handler.async_response(
        [r.DNSIncoming(packet) for packet in query.packets()], False
    )
    assert question_answers
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second

    mcast_now_additionals = set().union(*question_answers.mcast_now.values())
    assert a_record in mcast_now_additionals
    assert info.dns_text() in mcast_now_additionals
    assert info.dns_addresses()[0] in mcast_now_additionals
    assert info.dns_pointer() in question_answers.mcast_now

    ucast_additionals = set().union(*question_answers.ucast.values())
    assert info2.dns_pointer() in question_answers.ucast
    assert info2.dns_text() in ucast_additionals
    assert info2.dns_service() in ucast_additionals
    assert info2.dns_addresses()[0] in ucast_additionals

    # unregister
    zc.registry.async_remove(info)
    await aiozc.async_close()


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_cache_flush_bit():
    """Test that the cache flush bit sets the TTL to one for matching records."""
    # instantiate a zeroconf instance
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf

    type_ = "_cacheflush._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "server-uu1.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    a_record = info.dns_addresses()[0]
    zc.cache.async_add_records([info.dns_pointer(), a_record, info.dns_text(), info.dns_service()])

    info.addresses = [socket.inet_aton("10.0.1.5"), socket.inet_aton("10.0.1.6")]
    new_records = info.dns_addresses()
    for new_record in new_records:
        assert new_record.unique is True

    original_a_record = zc.cache.async_get_unique(a_record)
    # Do the run within 1s to verify the original record is not going to be expired
    out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
    for answer in new_records:
        out.add_answer_at_time(answer, 0)
    for packet in out.packets():
        zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))
    assert zc.cache.async_get_unique(a_record) is original_a_record
    assert original_a_record is not None
    assert original_a_record.ttl != 1
    for record in new_records:
        assert zc.cache.async_get_unique(record) is not None

    original_a_record.created = current_time_millis() - 1500

    # Do the run within 1s to verify the original record is not going to be expired
    out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
    for answer in new_records:
        out.add_answer_at_time(answer, 0)
    for packet in out.packets():
        zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))
    assert original_a_record.ttl == 1
    for record in new_records:
        assert zc.cache.async_get_unique(record) is not None

    cached_record_group = [
        zc.cache.async_all_by_details(record.name, record.type, record.class_) for record in new_records
    ]
    for cached_records in cached_record_group:
        for cached_record in cached_records:
            assert cached_record is not None
            cached_record.created = current_time_millis() - 1500

    fresh_address = socket.inet_aton("4.4.4.4")
    info.addresses = [fresh_address]
    # Do the run within 1s to verify the two new records get marked as expired
    out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
    for answer in info.dns_addresses():
        out.add_answer_at_time(answer, 0)
    for packet in out.packets():
        zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))

    cached_record_group = [
        zc.cache.async_all_by_details(record.name, record.type, record.class_) for record in new_records
    ]
    for cached_records in cached_record_group:
        for cached_record in cached_records:
            # the new record should not be set to 1
            if cached_record == answer:
                assert cached_record.ttl != 1
                continue
            assert cached_record is not None
            assert cached_record.ttl == 1

    for entry in zc.cache.async_all_by_details(server_name, const._TYPE_A, const._CLASS_IN):
        assert isinstance(entry, r.DNSAddress)
        if entry.address == fresh_address:
            assert entry.ttl > 1
        else:
            assert entry.ttl == 1

    # Wait for the ttl 1 records to expire
    await asyncio.sleep(1.1)

    loaded_info = r.ServiceInfo(type_, registration_name)
    loaded_info.load_from_cache(zc)
    assert loaded_info.addresses == info.addresses

    await aiozc.async_close()


# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_record_update_manager_add_listener_callsback_existing_records():
    """Test that the RecordUpdateManager will callback existing records."""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc: Zeroconf = aiozc.zeroconf
    updated = []

    class MyListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None:
            """Update multiple records in one shot."""
            updated.extend(records)

    type_ = "_cacheflush._tcp.local."
    name = "knownname"
    registration_name = f"{name}.{type_}"
    desc = {"path": "/~paulsm/"}
    server_name = "server-uu1.local."
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        server_name,
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    a_record = info.dns_addresses()[0]
    ptr_record = info.dns_pointer()
    zc.cache.async_add_records([ptr_record, a_record, info.dns_text(), info.dns_service()])

    listener = MyListener()

    zc.add_listener(
        listener,
        [
            r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN),
            r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN),
        ],
    )
    await asyncio.sleep(0)  # flush out the call_soon_threadsafe

    assert {record.new for record in updated} == {ptr_record, a_record}

    # The old records should be None so we trigger Add events
    # in service browsers instead of Update events
    assert {record.old for record in updated} == {None}

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_questions_query_handler_populates_the_question_history_from_qm_questions():
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    now = current_time_millis()
    _clear_cache(zc)

    aiozc.zeroconf.registry.async_add(
        ServiceInfo(
            "_hap._tcp.local.",
            "other._hap._tcp.local.",
            80,
            0,
            0,
            {"md": "known"},
            "ash-2.local.",
            addresses=[socket.inet_aton("1.2.3.4")],
        )
    )
    services = aiozc.zeroconf.registry.async_get_infos_type("_hap._tcp.local.")
    assert len(services) == 1
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion("_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
    question.unicast = False
    known_answer = r.DNSPointer(
        "_hap._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN,
        10000,
        "known-to-other._hap._tcp.local.",
    )
    generated.add_question(question)
    generated.add_answer_at_time(known_answer, 0)
    now = r.current_time_millis()
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert not question_answers.ucast
    assert not question_answers.mcast_now
    assert question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    assert zc.question_history.suppresses(question, now, {known_answer})

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_questions_query_handler_does_not_put_qu_questions_in_history():
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    now = current_time_millis()
    _clear_cache(zc)
    info = ServiceInfo(
        "_hap._tcp.local.",
        "qu._hap._tcp.local.",
        80,
        0,
        0,
        {"md": "known"},
        "ash-2.local.",
        addresses=[socket.inet_aton("1.2.3.4")],
    )
    aiozc.zeroconf.registry.async_add(info)
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion("_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
    question.unicast = True
    known_answer = r.DNSPointer(
        "_hap._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN,
        10000,
        "notqu._hap._tcp.local.",
    )
    generated.add_question(question)
    generated.add_answer_at_time(known_answer, 0)
    now = r.current_time_millis()
    packets = generated.packets()
    question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
    assert question_answers
    assert "qu._hap._tcp.local." in str(question_answers)
    assert not question_answers.ucast  # has not multicast recently
    assert question_answers.mcast_now
    assert not question_answers.mcast_aggregate
    assert not question_answers.mcast_aggregate_last_second
    assert not zc.question_history.suppresses(question, now, {known_answer})

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_guard_against_low_ptr_ttl():
    """Ensure we enforce a min for PTR record ttls to avoid excessive refresh queries from ServiceBrowsers.

    Some poorly designed IoT devices can set excessively low PTR
    TTLs would will cause ServiceBrowsers to flood the network
    with excessive refresh queries.
    """
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    # Apple uses a 15s minimum TTL, however we do not have the same
    # level of rate limit and safe guards so we use 1/4 of the recommended value
    answer_with_low_ttl = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        2,
        "low.local.",
    )
    answer_with_normal_ttl = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        const._DNS_OTHER_TTL,
        "normal.local.",
    )
    good_bye_answer = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        0,
        "goodbye.local.",
    )
    # TTL should be adjusted to a safe value
    response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    response.add_answer_at_time(answer_with_low_ttl, 0)
    response.add_answer_at_time(answer_with_normal_ttl, 0)
    response.add_answer_at_time(good_bye_answer, 0)
    incoming = r.DNSIncoming(response.packets()[0])
    zc.record_manager.async_updates_from_response(incoming)

    incoming_answer_low = zc.cache.async_get_unique(answer_with_low_ttl)
    assert incoming_answer_low is not None
    assert incoming_answer_low.ttl == const._DNS_PTR_MIN_TTL
    incoming_answer_normal = zc.cache.async_get_unique(answer_with_normal_ttl)
    assert incoming_answer_normal is not None
    assert incoming_answer_normal.ttl == const._DNS_OTHER_TTL
    assert zc.cache.async_get_unique(good_bye_answer) is None
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_duplicate_goodbye_answers_in_packet():
    """Ensure we do not throw an exception when there are duplicate goodbye records in a packet."""
    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc = aiozc.zeroconf
    answer_with_normal_ttl = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        const._DNS_OTHER_TTL,
        "host.local.",
    )
    good_bye_answer = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        0,
        "host.local.",
    )
    response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    response.add_answer_at_time(answer_with_normal_ttl, 0)
    incoming = r.DNSIncoming(response.packets()[0])
    zc.record_manager.async_updates_from_response(incoming)

    response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    response.add_answer_at_time(good_bye_answer, 0)
    response.add_answer_at_time(good_bye_answer, 0)
    incoming = r.DNSIncoming(response.packets()[0])
    zc.record_manager.async_updates_from_response(incoming)
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_response_aggregation_timings(run_isolated):
    """Verify multicast responses are aggregated."""
    type_ = "_mservice._tcp.local."
    type_2 = "_mservice2._tcp.local."
    type_3 = "_mservice3._tcp.local."

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()

    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name}.{type_2}"
    registration_name3 = f"{name}.{type_3}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-4.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    info3 = ServiceInfo(
        type_3,
        registration_name3,
        80,
        0,
        0,
        desc,
        "ash-4.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    aiozc.zeroconf.registry.async_add(info)
    aiozc.zeroconf.registry.async_add(info2)
    aiozc.zeroconf.registry.async_add(info3)

    query = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    query.add_question(question)

    query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question2 = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
    query2.add_question(question2)

    query3 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question3 = r.DNSQuestion(info3.type, const._TYPE_PTR, const._CLASS_IN)
    query3.add_question(question3)

    query4 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    query4.add_question(question)
    query4.add_question(question2)

    zc = aiozc.zeroconf
    protocol = zc.engine.protocols[0]

    with patch.object(aiozc.zeroconf, "async_send") as send_mock:
        protocol.datagram_received(query.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.datagram_received(query.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        await asyncio.sleep(0.7)

        # Should aggregate into a single answer with up to a 500ms + 120ms delay
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        zc.record_manager.async_updates_from_response(incoming)
        assert info.dns_pointer() in incoming.answers()
        assert info2.dns_pointer() in incoming.answers()
        send_mock.reset_mock()

        protocol.datagram_received(query3.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        await asyncio.sleep(0.3)

        # Should send within 120ms since there are no other
        # answers to aggregate with
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        zc.record_manager.async_updates_from_response(incoming)
        assert info3.dns_pointer() in incoming.answers()
        send_mock.reset_mock()

        # Because the response was sent in the last second we need to make
        # sure the next answer is delayed at least a second
        aiozc.zeroconf.engine.protocols[0].datagram_received(
            query4.packets()[0], ("127.0.0.1", const._MDNS_PORT)
        )
        await asyncio.sleep(0.5)

        # After 0.5 seconds it should not have been sent
        # Protect the network against excessive packet flooding
        # https://datatracker.ietf.org/doc/html/rfc6762#section-14
        calls = send_mock.mock_calls
        assert len(calls) == 0
        send_mock.reset_mock()

        await asyncio.sleep(1.2)
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        assert info.dns_pointer() in incoming.answers()

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_response_aggregation_timings_multiple(run_isolated, disable_duplicate_packet_suppression):
    """Verify multicast responses that are aggregated do not take longer than 620ms to send.

    620ms is the maximum random delay of 120ms and 500ms additional for aggregation."""
    type_2 = "_mservice2._tcp.local."

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    await aiozc.zeroconf.async_wait_for_start()

    name = "xxxyyy"
    registration_name2 = f"{name}.{type_2}"

    desc = {"path": "/~paulsm/"}
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-4.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    aiozc.zeroconf.registry.async_add(info2)

    query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question2 = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
    query2.add_question(question2)

    zc = aiozc.zeroconf
    protocol = zc.engine.protocols[0]

    with patch.object(aiozc.zeroconf, "async_send") as send_mock:
        send_mock.reset_mock()
        protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.last_time = 0  # manually reset the last time to avoid duplicate packet suppression
        await asyncio.sleep(0.2)
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        zc.record_manager.async_updates_from_response(incoming)
        assert info2.dns_pointer() in incoming.answers()

        send_mock.reset_mock()
        protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.last_time = 0  # manually reset the last time to avoid duplicate packet suppression
        await asyncio.sleep(1.2)
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        zc.record_manager.async_updates_from_response(incoming)
        assert info2.dns_pointer() in incoming.answers()

        send_mock.reset_mock()
        protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.last_time = 0  # manually reset the last time to avoid duplicate packet suppression
        protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT))
        protocol.last_time = 0  # manually reset the last time to avoid duplicate packet suppression
        # The delay should increase with two packets and
        # 900ms is beyond the maximum aggregation delay
        # when there is no network protection delay
        await asyncio.sleep(0.9)
        calls = send_mock.mock_calls
        assert len(calls) == 0

        # 1000ms  (1s network protection delays)
        # - 900ms (already slept)
        # + 120ms (maximum random delay)
        # + 200ms (maximum protected aggregation delay)
        # +  20ms (execution time)
        await asyncio.sleep(millis_to_seconds(1000 - 900 + 120 + 200 + 20))
        calls = send_mock.mock_calls
        assert len(calls) == 1
        outgoing = send_mock.call_args[0][0]
        incoming = r.DNSIncoming(outgoing.packets()[0])
        zc.record_manager.async_updates_from_response(incoming)
        assert info2.dns_pointer() in incoming.answers()


@pytest.mark.asyncio
async def test_response_aggregation_random_delay():
    """Verify the random delay for outgoing multicast will coalesce into a single group

    When the random delay is shorter than the last outgoing group,
    the groups should be combined.
    """
    type_ = "_mservice._tcp.local."
    type_2 = "_mservice2._tcp.local."
    type_3 = "_mservice3._tcp.local."
    type_4 = "_mservice4._tcp.local."
    type_5 = "_mservice5._tcp.local."

    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name}.{type_2}"
    registration_name3 = f"{name}.{type_3}"
    registration_name4 = f"{name}.{type_4}"
    registration_name5 = f"{name}.{type_5}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-1.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    info3 = ServiceInfo(
        type_3,
        registration_name3,
        80,
        0,
        0,
        desc,
        "ash-3.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info4 = ServiceInfo(
        type_4,
        registration_name4,
        80,
        0,
        0,
        desc,
        "ash-4.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info5 = ServiceInfo(
        type_5,
        registration_name5,
        80,
        0,
        0,
        desc,
        "ash-5.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    mocked_zc = unittest.mock.MagicMock()
    outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 500)

    now = current_time_millis()
    outgoing_queue._multicast_delay_random_min = 500
    outgoing_queue._multicast_delay_random_max = 600
    outgoing_queue.async_add(now, {info.dns_pointer(): set()})

    # The second group should always be coalesced into first group since it will always come before
    outgoing_queue._multicast_delay_random_min = 300
    outgoing_queue._multicast_delay_random_max = 400
    outgoing_queue.async_add(now, {info2.dns_pointer(): set()})

    # The third group should always be coalesced into first group since it will always come before
    outgoing_queue._multicast_delay_random_min = 100
    outgoing_queue._multicast_delay_random_max = 200
    outgoing_queue.async_add(now, {info3.dns_pointer(): set(), info4.dns_pointer(): set()})

    assert len(outgoing_queue.queue) == 1
    assert info.dns_pointer() in outgoing_queue.queue[0].answers
    assert info2.dns_pointer() in outgoing_queue.queue[0].answers
    assert info3.dns_pointer() in outgoing_queue.queue[0].answers
    assert info4.dns_pointer() in outgoing_queue.queue[0].answers

    # The forth group should not be coalesced because its scheduled after the last group in the queue
    outgoing_queue._multicast_delay_random_min = 700
    outgoing_queue._multicast_delay_random_max = 800
    outgoing_queue.async_add(now, {info5.dns_pointer(): set()})

    assert len(outgoing_queue.queue) == 2
    assert info.dns_pointer() not in outgoing_queue.queue[1].answers
    assert info2.dns_pointer() not in outgoing_queue.queue[1].answers
    assert info3.dns_pointer() not in outgoing_queue.queue[1].answers
    assert info4.dns_pointer() not in outgoing_queue.queue[1].answers
    assert info5.dns_pointer() in outgoing_queue.queue[1].answers


@pytest.mark.asyncio
async def test_future_answers_are_removed_on_send():
    """Verify any future answers scheduled to be sent are removed when we send."""
    type_ = "_mservice._tcp.local."
    type_2 = "_mservice2._tcp.local."
    name = "xxxyyy"
    registration_name = f"{name}.{type_}"
    registration_name2 = f"{name}.{type_2}"

    desc = {"path": "/~paulsm/"}
    info = ServiceInfo(
        type_,
        registration_name,
        80,
        0,
        0,
        desc,
        "ash-1.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )
    info2 = ServiceInfo(
        type_2,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.3")],
    )
    mocked_zc = unittest.mock.MagicMock()
    outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 0)

    now = current_time_millis()
    outgoing_queue._multicast_delay_random_min = 1
    outgoing_queue._multicast_delay_random_max = 1
    outgoing_queue.async_add(now, {info.dns_pointer(): set()})

    assert len(outgoing_queue.queue) == 1

    outgoing_queue._multicast_delay_random_min = 2
    outgoing_queue._multicast_delay_random_max = 2
    outgoing_queue.async_add(now, {info.dns_pointer(): set()})

    assert len(outgoing_queue.queue) == 2

    outgoing_queue._multicast_delay_random_min = 1000
    outgoing_queue._multicast_delay_random_max = 1000
    outgoing_queue.async_add(now, {info2.dns_pointer(): set()})
    outgoing_queue.async_add(now, {info.dns_pointer(): set()})

    assert len(outgoing_queue.queue) == 3

    await asyncio.sleep(0.1)
    outgoing_queue.async_ready()

    assert len(outgoing_queue.queue) == 1
    # The answer should get removed because we just sent it
    assert info.dns_pointer() not in outgoing_queue.queue[0].answers

    # But the one we have not sent yet should still go out later
    assert info2.dns_pointer() in outgoing_queue.queue[0].answers


@pytest.mark.asyncio
async def test_add_listener_warns_when_not_using_record_update_listener(caplog):
    """Log when a listener is added that is not using RecordUpdateListener as a base class."""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc: Zeroconf = aiozc.zeroconf
    updated = []

    class MyListener:
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None:
            """Update multiple records in one shot."""
            updated.extend(records)

    zc.add_listener(MyListener(), None)  # type: ignore[arg-type]
    await asyncio.sleep(0)  # flush out any call soons
    assert (
        "listeners passed to async_add_listener must inherit from RecordUpdateListener" in caplog.text
        or "TypeError: Argument 'listener' has incorrect type" in caplog.text
    )

    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_updates_iteration_safe():
    """Ensure we can safely iterate over the async_updates."""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc: Zeroconf = aiozc.zeroconf
    updated = []
    good_bye_answer = r.DNSPointer(
        "myservicelow_tcp._tcp.local.",
        const._TYPE_PTR,
        const._CLASS_IN | const._CLASS_UNIQUE,
        0,
        "goodbye.local.",
    )

    class OtherListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None:
            """Update multiple records in one shot."""
            updated.extend(records)

    other = OtherListener()

    class ListenerThatAddsListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None:
            """Update multiple records in one shot."""
            updated.extend(records)
            zc.async_add_listener(other, None)

    zc.async_add_listener(ListenerThatAddsListener(), None)
    await asyncio.sleep(0)  # flush out any call soons

    # This should not raise RuntimeError: set changed size during iteration
    zc.record_manager.async_updates(
        now=current_time_millis(), records=[r.RecordUpdate(good_bye_answer, None)]
    )

    assert len(updated) == 1
    await aiozc.async_close()


@pytest.mark.asyncio
async def test_async_updates_complete_iteration_safe():
    """Ensure we can safely iterate over the async_updates_complete."""

    aiozc = AsyncZeroconf(interfaces=["127.0.0.1"])
    zc: Zeroconf = aiozc.zeroconf

    class OtherListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records_complete(self) -> None:
            """Update multiple records in one shot."""

    other = OtherListener()

    class ListenerThatAddsListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def async_update_records_complete(self) -> None:
            """Update multiple records in one shot."""
            zc.async_add_listener(other, None)

    zc.async_add_listener(ListenerThatAddsListener(), None)
    await asyncio.sleep(0)  # flush out any call soons

    # This should not raise RuntimeError: set changed size during iteration
    zc.record_manager.async_updates_complete(False)
    await aiozc.async_close()
0707010000007B000081A400000000000000000000000167C7AD1600000AB0000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/tests/test_history.py"""Unit tests for _history.py."""

from __future__ import annotations

import zeroconf as r
from zeroconf import const
from zeroconf._history import QuestionHistory


def test_question_suppression():
    history = QuestionHistory()

    question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
    now = r.current_time_millis()
    other_known_answers: set[r.DNSRecord] = {
        r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            10000,
            "known-to-other._hap._tcp.local.",
        )
    }
    our_known_answers: set[r.DNSRecord] = {
        r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            10000,
            "known-to-us._hap._tcp.local.",
        )
    }

    history.add_question_at_time(question, now, other_known_answers)

    # Verify the question is suppressed if the known answers are the same
    assert history.suppresses(question, now, other_known_answers)

    # Verify the question is suppressed if we know the answer to all the known answers
    assert history.suppresses(question, now, other_known_answers | our_known_answers)

    # Verify the question is not suppressed if our known answers do no include the ones in the last question
    assert not history.suppresses(question, now, set())

    # Verify the question is not suppressed if our known answers do no include the ones in the last question
    assert not history.suppresses(question, now, our_known_answers)

    # Verify the question is no longer suppressed after 1s
    assert not history.suppresses(question, now + 1000, other_known_answers)


def test_question_expire():
    history = QuestionHistory()

    now = r.current_time_millis()
    question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
    other_known_answers: set[r.DNSRecord] = {
        r.DNSPointer(
            "_hap._tcp.local.",
            const._TYPE_PTR,
            const._CLASS_IN,
            10000,
            "known-to-other._hap._tcp.local.",
            created=now,
        )
    }
    history.add_question_at_time(question, now, other_known_answers)

    # Verify the question is suppressed if the known answers are the same
    assert history.suppresses(question, now, other_known_answers)

    history.async_expire(now)

    # Verify the question is suppressed if the known answers are the same since the cache hasn't expired
    assert history.suppresses(question, now, other_known_answers)

    history.async_expire(now + 1000)

    # Verify the question not longer suppressed since the cache has expired
    assert not history.suppresses(question, now, other_known_answers)
0707010000007C000081A400000000000000000000000167C7AD1600001A07000000000000000000000000000000000000002B00000000python-zeroconf-0.146.0/tests/test_init.py"""Unit tests for zeroconf.py"""

from __future__ import annotations

import logging
import socket
import time
import unittest.mock
from unittest.mock import patch

import zeroconf as r
from zeroconf import ServiceInfo, Zeroconf, const

from . import _inject_responses

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class Names(unittest.TestCase):
    def test_long_name(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        question = r.DNSQuestion(
            "this.is.a.very.long.name.with.lots.of.parts.in.it.local.",
            const._TYPE_SRV,
            const._CLASS_IN,
        )
        generated.add_question(question)
        r.DNSIncoming(generated.packets()[0])

    def test_exceedingly_long_name(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        name = f"{'part.' * 1000}local."
        question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
        generated.add_question(question)
        r.DNSIncoming(generated.packets()[0])

    def test_extra_exceedingly_long_name(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        name = f"{'part.' * 4000}local."
        question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
        generated.add_question(question)
        r.DNSIncoming(generated.packets()[0])

    def test_exceedingly_long_name_part(self):
        name = f"{'a' * 1000}.local."
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
        generated.add_question(question)
        self.assertRaises(r.NamePartTooLongException, generated.packets)

    def test_same_name(self):
        name = "paired.local."
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
        generated.add_question(question)
        generated.add_question(question)
        r.DNSIncoming(generated.packets()[0])

    def test_verify_name_change_with_lots_of_names(self):
        # instantiate a zeroconf instance
        zc = Zeroconf(interfaces=["127.0.0.1"])

        # create a bunch of servers
        type_ = "_my-service._tcp.local."
        name = "a wonderful service"
        server_count = 300
        self.generate_many_hosts(zc, type_, name, server_count)

        # verify that name changing works
        self.verify_name_change(zc, type_, name, server_count)

        zc.close()

    def test_large_packet_exception_log_handling(self):
        """Verify we downgrade debug after warning."""

        # instantiate a zeroconf instance
        zc = Zeroconf(interfaces=["127.0.0.1"])

        with (
            patch("zeroconf._logger.log.warning") as mocked_log_warn,
            patch("zeroconf._logger.log.debug") as mocked_log_debug,
        ):
            # now that we have a long packet in our possession, let's verify the
            # exception handling.
            out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
            out.data.append(b"\0" * 10000)

            # mock the zeroconf logger and check for the correct logging backoff
            call_counts = mocked_log_warn.call_count, mocked_log_debug.call_count
            # try to send an oversized packet
            zc.send(out)
            assert mocked_log_warn.call_count == call_counts[0]
            zc.send(out)
            assert mocked_log_warn.call_count == call_counts[0]

            # mock the zeroconf logger and check for the correct logging backoff
            call_counts = mocked_log_warn.call_count, mocked_log_debug.call_count
            # force receive on oversized packet
            zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
            zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
            time.sleep(0.3)
            r.log.debug(
                "warn %d debug %d was %s",
                mocked_log_warn.call_count,
                mocked_log_debug.call_count,
                call_counts,
            )
            assert mocked_log_debug.call_count > call_counts[0]

        # close our zeroconf which will close the sockets
        zc.close()

    def verify_name_change(self, zc, type_, name, number_hosts):
        desc = {"path": "/~paulsm/"}
        info_service = ServiceInfo(
            type_,
            f"{name}.{type_}",
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )

        # verify name conflict
        self.assertRaises(r.NonUniqueNameException, zc.register_service, info_service)

        # verify no name conflict https://tools.ietf.org/html/rfc6762#section-6.6
        zc.register_service(info_service, cooperating_responders=True)

        # Create a new object since allow_name_change will mutate the
        # original object and then we will have the wrong service
        # in the registry
        info_service2 = ServiceInfo(
            type_,
            f"{name}.{type_}",
            80,
            0,
            0,
            desc,
            "ash-2.local.",
            addresses=[socket.inet_aton("10.0.1.2")],
        )
        zc.register_service(info_service2, allow_name_change=True)
        assert info_service2.name.split(".")[0] == f"{name}-{number_hosts + 1}"

    def generate_many_hosts(self, zc, type_, name, number_hosts):
        block_size = 25
        number_hosts = int((number_hosts - 1) / block_size + 1) * block_size
        out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
        for i in range(1, number_hosts + 1):
            next_name = name if i == 1 else f"{name}-{i}"
            self.generate_host(out, next_name, type_)

        _inject_responses(zc, [r.DNSIncoming(packet) for packet in out.packets()])

    @staticmethod
    def generate_host(out, host_name, type_):
        name = ".".join((host_name, type_))
        out.add_answer_at_time(
            r.DNSPointer(type_, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, name),
            0,
        )
        out.add_answer_at_time(
            r.DNSService(
                type_,
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                name,
            ),
            0,
        )
0707010000007D000081A400000000000000000000000167C7AD16000022BD000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/tests/test_listener.py"""Unit tests for zeroconf._listener"""

from __future__ import annotations

import logging
import unittest
import unittest.mock
from unittest.mock import MagicMock, patch

import zeroconf as r
from zeroconf import (
    ServiceInfo,
    Zeroconf,
    _engine,
    _listener,
    const,
    current_time_millis,
)
from zeroconf._protocol import outgoing
from zeroconf._protocol.incoming import DNSIncoming

from . import QuestionHistoryWithoutSuppression

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


def test_guard_against_oversized_packets():
    """Ensure we do not process oversized packets.

    These packets can quickly overwhelm the system.
    """
    zc = Zeroconf(interfaces=["127.0.0.1"])

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)

    for _i in range(5000):
        generated.add_answer_at_time(
            r.DNSText(
                "packet{i}.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                500,
                b"path=/~paulsm/",
            ),
            0,
        )

    try:
        # We are patching to generate an oversized packet
        with (
            patch.object(outgoing, "_MAX_MSG_ABSOLUTE", 100000),
            patch.object(outgoing, "_MAX_MSG_TYPICAL", 100000),
        ):
            over_sized_packet = generated.packets()[0]
            assert len(over_sized_packet) > const._MAX_MSG_ABSOLUTE
    except AttributeError:
        # cannot patch with cython
        zc.close()
        return

    generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
    okpacket_record = r.DNSText(
        "okpacket.local.",
        const._TYPE_TXT,
        const._CLASS_IN | const._CLASS_UNIQUE,
        500,
        b"path=/~paulsm/",
    )

    generated.add_answer_at_time(
        okpacket_record,
        0,
    )
    ok_packet = generated.packets()[0]

    # We cannot test though the network interface as some operating systems
    # will guard against the oversized packet and we won't see it.
    listener = _listener.AsyncListener(zc)
    listener.transport = unittest.mock.MagicMock()

    listener.datagram_received(ok_packet, ("127.0.0.1", const._MDNS_PORT))
    assert zc.cache.async_get_unique(okpacket_record) is not None

    listener.datagram_received(over_sized_packet, ("127.0.0.1", const._MDNS_PORT))
    assert (
        zc.cache.async_get_unique(
            r.DNSText(
                "packet0.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                500,
                b"path=/~paulsm/",
            )
        )
        is None
    )

    logging.getLogger("zeroconf").setLevel(logging.INFO)

    listener.datagram_received(over_sized_packet, ("::1", const._MDNS_PORT, 1, 1))
    assert (
        zc.cache.async_get_unique(
            r.DNSText(
                "packet0.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                500,
                b"path=/~paulsm/",
            )
        )
        is None
    )

    zc.close()


def test_guard_against_duplicate_packets():
    """Ensure we do not process duplicate packets.
    These packets can quickly overwhelm the system.
    """
    zc = Zeroconf(interfaces=["127.0.0.1"])
    zc.registry.async_add(
        ServiceInfo(
            "_http._tcp.local.",
            "Test._http._tcp.local.",
            server="Test._http._tcp.local.",
            port=4,
        )
    )
    zc.question_history = QuestionHistoryWithoutSuppression()

    class SubListener(_listener.AsyncListener):
        def handle_query_or_defer(
            self,
            msg: DNSIncoming,
            addr: str,
            port: int,
            transport: _engine._WrappedTransport,
            v6_flow_scope: tuple[()] | tuple[int, int] = (),
        ) -> None:
            """Handle a query or defer it for later processing."""
            super().handle_query_or_defer(msg, addr, port, transport, v6_flow_scope)

    listener = SubListener(zc)
    listener.transport = MagicMock()

    query = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question = r.DNSQuestion("x._http._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
    query.add_question(question)
    packet_with_qm_question = query.packets()[0]

    query3 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question3 = r.DNSQuestion("x._ay._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
    query3.add_question(question3)
    packet_with_qm_question2 = query3.packets()[0]

    query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
    question2 = r.DNSQuestion("x._http._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
    question2.unicast = True
    query2.add_question(question2)
    packet_with_qu_question = query2.packets()[0]

    addrs = ("1.2.3.4", 43)

    with patch.object(listener, "handle_query_or_defer") as _handle_query_or_defer:
        start_time = current_time_millis()

        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            start_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call with the same packet again and handle_query_or_defer should not fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            start_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_not_called()
        _handle_query_or_defer.reset_mock()

        # Now walk time forward 1100 milliseconds
        new_time = start_time + 1100
        # Now call with the same packet again and handle_query_or_defer should fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            new_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call with the different packet and handle_query_or_defer should fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question2),
            new_time,
            packet_with_qm_question2,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call with the different packet and handle_query_or_defer should fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            new_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call with the different packet with qu question and handle_query_or_defer should fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qu_question),
            new_time,
            packet_with_qu_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call again with the same packet that has a qu question and handle_query_or_defer should fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qu_question),
            new_time,
            packet_with_qu_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        log.setLevel(logging.WARNING)

        # Call with the QM packet again
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            new_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_called_once()
        _handle_query_or_defer.reset_mock()

        # Now call with the same packet again and handle_query_or_defer should not fire
        listener._process_datagram_at_time(
            False,
            len(packet_with_qm_question),
            new_time,
            packet_with_qm_question,
            addrs,
        )
        _handle_query_or_defer.assert_not_called()
        _handle_query_or_defer.reset_mock()

        # Now call with garbage
        listener._process_datagram_at_time(False, len(b"garbage"), new_time, b"garbage", addrs)
        _handle_query_or_defer.assert_not_called()
        _handle_query_or_defer.reset_mock()

    zc.close()
0707010000007E000081A400000000000000000000000167C7AD1600000D92000000000000000000000000000000000000002D00000000python-zeroconf-0.146.0/tests/test_logger.py"""Unit tests for logger.py."""

from __future__ import annotations

import logging
from unittest.mock import call, patch

from zeroconf._logger import QuietLogger, set_logger_level_if_unset


def test_loading_logger():
    """Test loading logger does not change level unless it is unset."""
    log = logging.getLogger("zeroconf")
    log.setLevel(logging.CRITICAL)
    set_logger_level_if_unset()
    log = logging.getLogger("zeroconf")
    assert log.level == logging.CRITICAL

    log = logging.getLogger("zeroconf")
    log.setLevel(logging.NOTSET)
    set_logger_level_if_unset()
    log = logging.getLogger("zeroconf")
    assert log.level == logging.WARNING


def test_log_warning_once():
    """Test we only log with warning level once."""
    QuietLogger._seen_logs = {}
    quiet_logger = QuietLogger()
    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_warning_once("the warning")

    assert mock_log_warning.mock_calls
    assert not mock_log_debug.mock_calls

    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_warning_once("the warning")

    assert not mock_log_warning.mock_calls
    assert mock_log_debug.mock_calls


def test_log_exception_warning():
    """Test we only log with warning level once."""
    QuietLogger._seen_logs = {}
    quiet_logger = QuietLogger()
    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_exception_warning("the exception warning")

    assert mock_log_warning.mock_calls
    assert not mock_log_debug.mock_calls

    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_exception_warning("the exception warning")

    assert not mock_log_warning.mock_calls
    assert mock_log_debug.mock_calls


def test_llog_exception_debug():
    """Test we only log with a trace once."""
    QuietLogger._seen_logs = {}
    quiet_logger = QuietLogger()
    with patch("zeroconf._logger.log.debug") as mock_log_debug:
        quiet_logger.log_exception_debug("the exception")

    assert mock_log_debug.mock_calls == [call("the exception", exc_info=True)]

    with patch("zeroconf._logger.log.debug") as mock_log_debug:
        quiet_logger.log_exception_debug("the exception")

    assert mock_log_debug.mock_calls == [call("the exception", exc_info=False)]


def test_log_exception_once():
    """Test we only log with warning level once."""
    QuietLogger._seen_logs = {}
    quiet_logger = QuietLogger()
    exc = Exception()
    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_exception_once(exc, "the exceptional exception warning")

    assert mock_log_warning.mock_calls
    assert not mock_log_debug.mock_calls

    with (
        patch("zeroconf._logger.log.warning") as mock_log_warning,
        patch("zeroconf._logger.log.debug") as mock_log_debug,
    ):
        quiet_logger.log_exception_once(exc, "the exceptional exception warning")

    assert not mock_log_warning.mock_calls
    assert mock_log_debug.mock_calls
0707010000007F000081A400000000000000000000000167C7AD160000AD0E000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/tests/test_protocol.py"""Unit tests for zeroconf._protocol"""

from __future__ import annotations

import copy
import logging
import os
import socket
import struct
import unittest.mock
from typing import cast

import pytest

import zeroconf as r
from zeroconf import DNSHinfo, DNSIncoming, DNSText, const, current_time_millis

from . import has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class PacketGeneration(unittest.TestCase):
    def test_parse_own_packet_simple(self):
        generated = r.DNSOutgoing(0)
        r.DNSIncoming(generated.packets()[0])

    def test_parse_own_packet_simple_unicast(self):
        generated = r.DNSOutgoing(0, False)
        r.DNSIncoming(generated.packets()[0])

    def test_parse_own_packet_flags(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        r.DNSIncoming(generated.packets()[0])

    def test_parse_own_packet_question(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        generated.add_question(r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN))
        r.DNSIncoming(generated.packets()[0])

    def test_parse_own_packet_nsec(self):
        answer = r.DNSNsec(
            "eufy HomeBase2-2464._hap._tcp.local.",
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            "eufy HomeBase2-2464._hap._tcp.local.",
            [const._TYPE_TXT, const._TYPE_SRV],
        )

        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(answer, 0)
        parsed = r.DNSIncoming(generated.packets()[0])
        assert answer in parsed.answers()

        # Now with the higher RD type first
        answer = r.DNSNsec(
            "eufy HomeBase2-2464._hap._tcp.local.",
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            "eufy HomeBase2-2464._hap._tcp.local.",
            [const._TYPE_SRV, const._TYPE_TXT],
        )

        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(answer, 0)
        parsed = r.DNSIncoming(generated.packets()[0])
        assert answer in parsed.answers()

        # Types > 255 should raise an exception
        answer_invalid_types = r.DNSNsec(
            "eufy HomeBase2-2464._hap._tcp.local.",
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            "eufy HomeBase2-2464._hap._tcp.local.",
            [const._TYPE_TXT, const._TYPE_SRV, 1000],
        )
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(answer_invalid_types, 0)
        with pytest.raises(ValueError, match="rdtype 1000 is too large for NSEC"):
            generated.packets()

        # Empty rdtypes are not allowed
        answer_invalid_types = r.DNSNsec(
            "eufy HomeBase2-2464._hap._tcp.local.",
            const._TYPE_NSEC,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            "eufy HomeBase2-2464._hap._tcp.local.",
            [],
        )
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(answer_invalid_types, 0)
        with pytest.raises(ValueError, match="NSEC must have at least one rdtype"):
            generated.packets()

    def test_parse_own_packet_response(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(
            r.DNSService(
                "æøå.local.",
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                "foo.local.",
            ),
            0,
        )
        parsed = r.DNSIncoming(generated.packets()[0])
        assert len(generated.answers) == 1
        assert len(generated.answers) == len(parsed.answers())

    def test_adding_empty_answer(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(
            None,
            0,
        )
        generated.add_answer_at_time(
            r.DNSService(
                "æøå.local.",
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                "foo.local.",
            ),
            0,
        )
        parsed = r.DNSIncoming(generated.packets()[0])
        assert len(generated.answers) == 1
        assert len(generated.answers) == len(parsed.answers())

    def test_adding_expired_answer(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        generated.add_answer_at_time(
            r.DNSService(
                "æøå.local.",
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                "foo.local.",
            ),
            current_time_millis() + 1000000,
        )
        parsed = r.DNSIncoming(generated.packets()[0])
        assert len(generated.answers) == 0
        assert len(generated.answers) == len(parsed.answers())

    def test_match_question(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
        generated.add_question(question)
        parsed = r.DNSIncoming(generated.packets()[0])
        assert len(generated.questions) == 1
        assert len(generated.questions) == len(parsed.questions)
        assert question == parsed.questions[0]

    def test_suppress_answer(self):
        query_generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
        query_generated.add_question(question)
        answer1 = r.DNSService(
            "testname1.local.",
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "foo.local.",
        )
        staleanswer2 = r.DNSService(
            "testname2.local.",
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_HOST_TTL / 2,
            0,
            0,
            80,
            "foo.local.",
        )
        answer2 = r.DNSService(
            "testname2.local.",
            const._TYPE_SRV,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_HOST_TTL,
            0,
            0,
            80,
            "foo.local.",
        )
        query_generated.add_answer_at_time(answer1, 0)
        query_generated.add_answer_at_time(staleanswer2, 0)
        query = r.DNSIncoming(query_generated.packets()[0])

        # Should be suppressed
        response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        response.add_answer(query, answer1)
        assert len(response.answers) == 0

        # Should not be suppressed, TTL in query is too short
        response.add_answer(query, answer2)
        assert len(response.answers) == 1

        # Should not be suppressed, name is different
        tmp = copy.copy(answer1)
        tmp.key = "testname3.local."
        tmp.name = "testname3.local."
        response.add_answer(query, tmp)
        assert len(response.answers) == 2

        # Should not be suppressed, type is different
        tmp = copy.copy(answer1)
        tmp.type = const._TYPE_A
        response.add_answer(query, tmp)
        assert len(response.answers) == 3

        # Should not be suppressed, class is different
        tmp = copy.copy(answer1)
        tmp.class_ = const._CLASS_NONE
        response.add_answer(query, tmp)
        assert len(response.answers) == 4

        # ::TODO:: could add additional tests for DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService

    def test_dns_hinfo(self):
        generated = r.DNSOutgoing(0)
        generated.add_additional_answer(DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "os"))
        parsed = r.DNSIncoming(generated.packets()[0])
        answer = cast(r.DNSHinfo, parsed.answers()[0])
        assert answer.cpu == "cpu"
        assert answer.os == "os"

        generated = r.DNSOutgoing(0)
        generated.add_additional_answer(DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "x" * 257))
        self.assertRaises(r.NamePartTooLongException, generated.packets)

    def test_many_questions(self):
        """Test many questions get separated into multiple packets."""
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        questions = []
        for i in range(100):
            question = r.DNSQuestion(f"testname{i}.local.", const._TYPE_SRV, const._CLASS_IN)
            generated.add_question(question)
            questions.append(question)
        assert len(generated.questions) == 100

        packets = generated.packets()
        assert len(packets) == 2
        assert len(packets[0]) < const._MAX_MSG_TYPICAL
        assert len(packets[1]) < const._MAX_MSG_TYPICAL

        parsed1 = r.DNSIncoming(packets[0])
        assert len(parsed1.questions) == 85
        parsed2 = r.DNSIncoming(packets[1])
        assert len(parsed2.questions) == 15

    def test_many_questions_with_many_known_answers(self):
        """Test many questions and known answers get separated into multiple packets."""
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        questions = []
        for _ in range(30):
            question = r.DNSQuestion("_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
            generated.add_question(question)
            questions.append(question)
        assert len(generated.questions) == 30
        now = current_time_millis()
        for _ in range(200):
            known_answer = r.DNSPointer(
                "myservice{i}_tcp._tcp.local.",
                const._TYPE_PTR,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                "123.local.",
            )
            generated.add_answer_at_time(known_answer, now)
        packets = generated.packets()
        assert len(packets) == 3
        assert len(packets[0]) <= const._MAX_MSG_TYPICAL
        assert len(packets[1]) <= const._MAX_MSG_TYPICAL
        assert len(packets[2]) <= const._MAX_MSG_TYPICAL

        parsed1 = r.DNSIncoming(packets[0])
        assert len(parsed1.questions) == 30
        assert len(parsed1.answers()) == 88
        assert parsed1.truncated
        parsed2 = r.DNSIncoming(packets[1])
        assert len(parsed2.questions) == 0
        assert len(parsed2.answers()) == 101
        assert parsed2.truncated
        parsed3 = r.DNSIncoming(packets[2])
        assert len(parsed3.questions) == 0
        assert len(parsed3.answers()) == 11
        assert not parsed3.truncated

    def test_massive_probe_packet_split(self):
        """Test probe with many authoritative answers."""
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
        questions = []
        for _ in range(30):
            question = r.DNSQuestion(
                "_hap._tcp.local.",
                const._TYPE_PTR,
                const._CLASS_IN | const._CLASS_UNIQUE,
            )
            generated.add_question(question)
            questions.append(question)
        assert len(generated.questions) == 30
        for _ in range(200):
            authorative_answer = r.DNSPointer(
                "myservice{i}_tcp._tcp.local.",
                const._TYPE_PTR,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                "123.local.",
            )
            generated.add_authorative_answer(authorative_answer)
        packets = generated.packets()
        assert len(packets) == 3
        assert len(packets[0]) <= const._MAX_MSG_TYPICAL
        assert len(packets[1]) <= const._MAX_MSG_TYPICAL
        assert len(packets[2]) <= const._MAX_MSG_TYPICAL

        parsed1 = r.DNSIncoming(packets[0])
        assert parsed1.questions[0].unicast is True
        assert len(parsed1.questions) == 30
        assert parsed1.num_questions == 30
        assert parsed1.num_authorities == 88
        assert parsed1.truncated
        parsed2 = r.DNSIncoming(packets[1])
        assert len(parsed2.questions) == 0
        assert parsed2.num_authorities == 101
        assert parsed2.truncated
        parsed3 = r.DNSIncoming(packets[2])
        assert len(parsed3.questions) == 0
        assert parsed3.num_authorities == 11
        assert not parsed3.truncated

    def test_only_one_answer_can_by_large(self):
        """Test that only the first answer in each packet can be large.

        https://datatracker.ietf.org/doc/html/rfc6762#section-17
        """
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        query = r.DNSIncoming(r.DNSOutgoing(const._FLAGS_QR_QUERY).packets()[0])
        for _i in range(3):
            generated.add_answer(
                query,
                r.DNSText(
                    "zoom._hap._tcp.local.",
                    const._TYPE_TXT,
                    const._CLASS_IN | const._CLASS_UNIQUE,
                    1200,
                    b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==" * 100,
                ),
            )
        generated.add_answer(
            query,
            r.DNSService(
                "testname1.local.",
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                "foo.local.",
            ),
        )
        assert len(generated.answers) == 4

        packets = generated.packets()
        assert len(packets) == 4
        assert len(packets[0]) <= const._MAX_MSG_ABSOLUTE
        assert len(packets[0]) > const._MAX_MSG_TYPICAL

        assert len(packets[1]) <= const._MAX_MSG_ABSOLUTE
        assert len(packets[1]) > const._MAX_MSG_TYPICAL

        assert len(packets[2]) <= const._MAX_MSG_ABSOLUTE
        assert len(packets[2]) > const._MAX_MSG_TYPICAL

        assert len(packets[3]) <= const._MAX_MSG_TYPICAL

        for packet in packets:
            parsed = r.DNSIncoming(packet)
            assert len(parsed.answers()) == 1

    def test_questions_do_not_end_up_every_packet(self):
        """Test that questions are not sent again when multiple packets are needed.

        https://datatracker.ietf.org/doc/html/rfc6762#section-7.2
        Sometimes a Multicast DNS querier will already have too many answers
        to fit in the Known-Answer Section of its query packets....  It MUST
        immediately follow the packet with another query packet containing no
        questions and as many more Known-Answer records as will fit.
        """

        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        for i in range(35):
            question = r.DNSQuestion(f"testname{i}.local.", const._TYPE_SRV, const._CLASS_IN)
            generated.add_question(question)
            answer = r.DNSService(
                f"testname{i}.local.",
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                80,
                f"foo{i}.local.",
            )
            generated.add_answer_at_time(answer, 0)

        assert len(generated.questions) == 35
        assert len(generated.answers) == 35

        packets = generated.packets()
        assert len(packets) == 2
        assert len(packets[0]) <= const._MAX_MSG_TYPICAL
        assert len(packets[1]) <= const._MAX_MSG_TYPICAL

        parsed1 = r.DNSIncoming(packets[0])
        assert len(parsed1.questions) == 35
        assert len(parsed1.answers()) == 33

        parsed2 = r.DNSIncoming(packets[1])
        assert len(parsed2.questions) == 0
        assert len(parsed2.answers()) == 2


class PacketForm(unittest.TestCase):
    def test_transaction_id(self):
        """ID must be zero in a DNS-SD packet"""
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        bytes = generated.packets()[0]
        id = bytes[0] << 8 | bytes[1]
        assert id == 0

    def test_setting_id(self):
        """Test setting id in the constructor"""
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY, id_=4444)
        assert generated.id == 4444

    def test_query_header_bits(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
        bytes = generated.packets()[0]
        flags = bytes[2] << 8 | bytes[3]
        assert flags == 0x0

    def test_response_header_bits(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        bytes = generated.packets()[0]
        flags = bytes[2] << 8 | bytes[3]
        assert flags == 0x8000

    def test_numbers(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        bytes = generated.packets()[0]
        (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack("!4H", bytes[4:12])
        assert num_questions == 0
        assert num_answers == 0
        assert num_authorities == 0
        assert num_additionals == 0

    def test_numbers_questions(self):
        generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
        question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
        for _i in range(10):
            generated.add_question(question)
        bytes = generated.packets()[0]
        (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack("!4H", bytes[4:12])
        assert num_questions == 10
        assert num_answers == 0
        assert num_authorities == 0
        assert num_additionals == 0


class TestDnsIncoming(unittest.TestCase):
    def test_incoming_exception_handling(self):
        generated = r.DNSOutgoing(0)
        packet = generated.packets()[0]
        packet = packet[:8] + b"deadbeef" + packet[8:]
        parsed = r.DNSIncoming(packet)
        parsed = r.DNSIncoming(packet)
        assert parsed.valid is False

    def test_incoming_unknown_type(self):
        generated = r.DNSOutgoing(0)
        answer = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a")
        generated.add_additional_answer(answer)
        packet = generated.packets()[0]
        parsed = r.DNSIncoming(packet)
        assert len(parsed.answers()) == 0
        assert parsed.is_query() != parsed.is_response()

    def test_incoming_circular_reference(self):
        assert not r.DNSIncoming(
            bytes.fromhex(
                "01005e0000fb542a1bf0577608004500006897934000ff11d81bc0a86a31e00000fb"
                "14e914e90054f9b2000084000000000100000000095f7365727669636573075f646e"
                "732d7364045f756470056c6f63616c00000c0001000011940018105f73706f746966"
                "792d636f6e6e656374045f746370c023"
            )
        ).valid

    @unittest.skipIf(not has_working_ipv6(), "Requires IPv6")
    @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled")
    def test_incoming_ipv6(self):
        addr = "2606:2800:220:1:248:1893:25c8:1946"  # example.com
        packed = socket.inet_pton(socket.AF_INET6, addr)
        generated = r.DNSOutgoing(0)
        answer = r.DNSAddress("domain", const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed)
        generated.add_additional_answer(answer)
        packet = generated.packets()[0]
        parsed = r.DNSIncoming(packet)
        record = parsed.answers()[0]
        assert isinstance(record, r.DNSAddress)
        assert record.address == packed


def test_dns_compression_rollback_for_corruption():
    """Verify rolling back does not lead to dns compression corruption."""
    out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    address = socket.inet_pton(socket.AF_INET, "192.168.208.5")

    additionals = [
        {
            "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
            "address": address,
            "port": 51832,
            "text": b"\x13md=HASS Bridge"
            b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
        },
        {
            "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
            "address": address,
            "port": 51834,
            "text": b"\x13md=HASS Bridge"
            b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
        },
        {
            "name": "Master Bed TV CEDB27._hap._tcp.local.",
            "address": address,
            "port": 51830,
            "text": b"\x10md=Master Bed"
            b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=CVj1kw==",
        },
        {
            "name": "Living Room TV 921B77._hap._tcp.local.",
            "address": address,
            "port": 51833,
            "text": b"\x11md=Living Room"
            b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=qU77SQ==",
        },
        {
            "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
            "address": address,
            "port": 51829,
            "text": b"\x13md=HASS Bridge"
            b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=b0QZlg==",
        },
        {
            "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
            "address": address,
            "port": 51837,
            "text": b"\x13md=HASS Bridge"
            b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=ahAISA==",
        },
        {
            "name": "FrontdoorCamera 8941D1._hap._tcp.local.",
            "address": address,
            "port": 54898,
            "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
            b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
        },
        {
            "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            "address": address,
            "port": 51836,
            "text": b"\x13md=HASS Bridge"
            b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=6fLM5A==",
        },
        {
            "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
            "address": address,
            "port": 51838,
            "text": b"\x13md=HASS Bridge"
            b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
            b"ci=2\x04sf=0\x0bsh=u3bdfw==",
        },
        {
            "name": "Snooze Room TV 6B89B0._hap._tcp.local.",
            "address": address,
            "port": 51835,
            "text": b"\x11md=Snooze Room"
            b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
            b"ci=31\x04sf=0\x0bsh=xNTqsg==",
        },
        {
            "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
            "address": address,
            "port": 54811,
            "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
            b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
        },
        {
            "name": "HASS Bridge OS95 39C053._hap._tcp.local.",
            "address": address,
            "port": 51831,
            "text": b"\x13md=HASS Bridge"
            b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
            b"\x04sf=0\x0bsh=Xfe5LQ==",
        },
    ]

    out.add_answer_at_time(
        DNSText(
            "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
            const._TYPE_TXT,
            const._CLASS_IN | const._CLASS_UNIQUE,
            const._DNS_OTHER_TTL,
            b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
            b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
        ),
        0,
    )

    for record in additionals:
        out.add_additional_answer(
            r.DNSService(
                record["name"],  # type: ignore
                const._TYPE_SRV,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                0,
                0,
                record["port"],  # type: ignore
                record["name"],  # type: ignore
            )
        )
        out.add_additional_answer(
            r.DNSText(
                record["name"],  # type: ignore
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                record["text"],  # type: ignore
            )
        )
        out.add_additional_answer(
            r.DNSAddress(
                record["name"],  # type: ignore
                const._TYPE_A,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_HOST_TTL,
                record["address"],  # type: ignore
            )
        )

    for packet in out.packets():
        # Verify we can process the packets we created to
        # ensure there is no corruption with the dns compression
        incoming = r.DNSIncoming(packet)
        assert incoming.valid is True
        assert (
            len(incoming.answers())
            == incoming.num_answers + incoming.num_authorities + incoming.num_additionals
        )


def test_tc_bit_in_query_packet():
    """Verify the TC bit is set when known answers exceed the packet size."""
    out = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    type_ = "_hap._tcp.local."
    out.add_question(r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN))

    for i in range(30):
        out.add_answer_at_time(
            DNSText(
                f"HASS Bridge W9DN {i}._hap._tcp.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
                b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
            ),
            0,
        )

    packets = out.packets()
    assert len(packets) == 3

    first_packet = r.DNSIncoming(packets[0])
    assert first_packet.truncated
    assert first_packet.valid is True

    second_packet = r.DNSIncoming(packets[1])
    assert second_packet.truncated
    assert second_packet.valid is True

    third_packet = r.DNSIncoming(packets[2])
    assert not third_packet.truncated
    assert third_packet.valid is True


def test_tc_bit_not_set_in_answer_packet():
    """Verify the TC bit is not set when there are no questions and answers exceed the packet size."""
    out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
    for i in range(30):
        out.add_answer_at_time(
            DNSText(
                f"HASS Bridge W9DN {i}._hap._tcp.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
                b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
            ),
            0,
        )

    packets = out.packets()
    assert len(packets) == 3

    first_packet = r.DNSIncoming(packets[0])
    assert not first_packet.truncated
    assert first_packet.valid is True

    second_packet = r.DNSIncoming(packets[1])
    assert not second_packet.truncated
    assert second_packet.valid is True

    third_packet = r.DNSIncoming(packets[2])
    assert not third_packet.truncated
    assert third_packet.valid is True


# MDNS	76	Standard query 0xffc4 PTR _raop._tcp.local, "QM" question
def test_qm_packet_parser():
    """Test we can parse a query packet with the QM bit."""
    qm_packet = (
        b"\xff\xc4\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x05_raop\x04_tcp\x05local\x00\x00\x0c\x00\x01"
    )
    parsed = DNSIncoming(qm_packet)
    assert parsed.questions[0].unicast is False
    assert ",QM," in str(parsed.questions[0])


# MDNS	115	Standard query 0x0000 PTR _companion-link._tcp.local, "QU" question OPT
def test_qu_packet_parser():
    """Test we can parse a query packet with the QU bit."""
    qu_packet = (
        b"\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x0f_companion-link\x04_tcp\x05local"
        b"\x00\x00\x0c\x80\x01\x00\x00)\x05\xa0\x00\x00\x11\x94\x00\x12\x00\x04\x00\x0e\x00dz{\x8a6\x9czF\x84,\xcaQ\xff"
    )
    parsed = DNSIncoming(qu_packet)
    assert parsed.questions[0].unicast is True
    assert ",QU," in str(parsed.questions[0])


def test_parse_packet_with_nsec_record():
    """Test we can parse a packet with an NSEC record."""
    nsec_packet = (
        b"\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x03\x08_meshcop\x04_udp\x05local\x00\x00\x0c\x00"
        b"\x01\x00\x00\x11\x94\x00\x0f\x0cMyHome54 (2)\xc0\x0c\xc0+\x00\x10\x80\x01\x00\x00\x11\x94\x00"
        b")\x0bnn=MyHome54\x13xp=695034D148CC4784\x08tv=0.0.0\xc0+\x00!\x80\x01\x00\x00\x00x\x00\x15\x00"
        b"\x00\x00\x00\xc0'\x0cMaster-Bed-2\xc0\x1a\xc0+\x00/\x80\x01\x00\x00\x11\x94\x00\t\xc0+\x00\x05"
        b"\x00\x00\x80\x00@"
    )
    parsed = DNSIncoming(nsec_packet)
    nsec_record = cast(r.DNSNsec, parsed.answers()[3])
    assert "nsec," in str(nsec_record)
    assert nsec_record.rdtypes == [16, 33]
    assert nsec_record.next_name == "MyHome54 (2)._meshcop._udp.local."


def test_records_same_packet_share_fate():
    """Test records in the same packet all have the same created time."""
    out = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    type_ = "_hap._tcp.local."
    out.add_question(r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN))

    for i in range(30):
        out.add_answer_at_time(
            DNSText(
                f"HASS Bridge W9DN {i}._hap._tcp.local.",
                const._TYPE_TXT,
                const._CLASS_IN | const._CLASS_UNIQUE,
                const._DNS_OTHER_TTL,
                b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1"
                b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==",
            ),
            0,
        )

    for packet in out.packets():
        dnsin = DNSIncoming(packet)
        first_time = dnsin.answers()[0].created
        for answer in dnsin.answers():
            assert answer.created == first_time


def test_dns_compression_invalid_skips_bad_name_compress_in_question():
    """Test our wire parser can skip bad compression in questions."""
    packet = (
        b"\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x11homeassistant1128\x05l"
        b"ocal\x00\x00\xff\x00\x014homeassistant1128 [534a4794e5ed41879ecf012252d3e02"
        b"a]\x0c_workstation\x04_tcp\xc0\x1e\x00\xff\x00\x014homeassistant1127 [534a47"
        b"94e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1123 [534a479"
        b"4e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1118 [534a4794"
        b"e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x01\xc0\x0c\x00\x01\x80"
        b"\x01\x00\x00\x00x\x00\x04\xc0\xa8<\xc3\xc0v\x00\x10\x80\x01\x00\x00\x00"
        b"x\x00\x01\x00\xc0v\x00!\x80\x01\x00\x00\x00x\x00\x1f\x00\x00\x00\x00"
        b"\x00\x00\x11homeassistant1127\x05local\x00\xc0\xb1\x00\x10\x80"
        b"\x01\x00\x00\x00x\x00\x01\x00\xc0\xb1\x00!\x80\x01\x00\x00\x00x\x00\x1f"
        b"\x00\x00\x00\x00\x00\x00\x11homeassistant1123\x05local\x00\xc0)\x00\x10\x80"
        b"\x01\x00\x00\x00x\x00\x01\x00\xc0)\x00!\x80\x01\x00\x00\x00x\x00\x1f"
        b"\x00\x00\x00\x00\x00\x00\x11homeassistant1128\x05local\x00"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.questions) == 4


def test_dns_compression_all_invalid(caplog):
    """Test our wire parser can skip all invalid data."""
    packet = (
        b"\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x00!roborock-vacuum-s5e_miio416"
        b"112328\x00\x00/\x80\x01\x00\x00\x00x\x00\t\xc0P\x00\x05@\x00\x00\x00\x00"
    )
    parsed = r.DNSIncoming(packet, ("2.4.5.4", 5353))
    assert len(parsed.questions) == 0
    assert len(parsed.answers()) == 0

    assert " Unable to parse; skipping record" in caplog.text


def test_invalid_next_name_ignored():
    """Test our wire parser does not throw an an invalid next name.

    The RFC states it should be ignored when used with mDNS.
    """
    packet = (
        b"\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x07Android\x05local\x00\x00"
        b"\xff\x00\x01\xc0\x0c\x00/\x00\x01\x00\x00\x00x\x00\x08\xc02\x00\x04@"
        b"\x00\x00\x08\xc0\x0c\x00\x01\x00\x01\x00\x00\x00x\x00\x04\xc0\xa8X<"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.questions) == 1
    assert len(parsed.answers()) == 2


def test_dns_compression_invalid_skips_record():
    """Test our wire parser can skip records we do not know how to parse."""
    packet = (
        b"\x00\x00\x84\x00\x00\x00\x00\x06\x00\x00\x00\x00\x04_hap\x04_tcp\x05local\x00\x00\x0c"
        b"\x00\x01\x00\x00\x11\x94\x00\x16\x13eufy HomeBase2-2464\xc0\x0c\x04Eufy\xc0\x16\x00/"
        b"\x80\x01\x00\x00\x00x\x00\x08\xc0\xa6\x00\x04@\x00\x00\x08\xc0'\x00/\x80\x01\x00\x00"
        b"\x11\x94\x00\t\xc0'\x00\x05\x00\x00\x80\x00@\xc0=\x00\x01\x80\x01\x00\x00\x00x\x00\x04"
        b"\xc0\xa8Dp\xc0'\x00!\x80\x01\x00\x00\x00x\x00\x08\x00\x00\x00\x00\xd1_\xc0=\xc0'\x00"
        b"\x10\x80\x01\x00\x00\x11\x94\x00K\x04c#=1\x04ff=2\x14id=38:71:4F:6B:76:00\x08md=T8010"
        b"\x06pv=1.1\x05s#=75\x04sf=1\x04ci=2\x0bsh=xaQk4g=="
    )
    parsed = r.DNSIncoming(packet)
    answer = r.DNSNsec(
        "eufy HomeBase2-2464._hap._tcp.local.",
        const._TYPE_NSEC,
        const._CLASS_IN | const._CLASS_UNIQUE,
        const._DNS_OTHER_TTL,
        "eufy HomeBase2-2464._hap._tcp.local.",
        [const._TYPE_TXT, const._TYPE_SRV],
    )
    assert answer in parsed.answers()


def test_dns_compression_points_forward():
    """Test our wire parser can unpack nsec records with compression."""
    packet = (
        b"\x00\x00\x84\x00\x00\x00\x00\x07\x00\x00\x00\x00\x0eTV Beneden (2)"
        b"\x10_androidtvremote\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x11"
        b"\x94\x00\x15\x14bt=D8:13:99:AC:98:F1\xc0\x0c\x00/\x80\x01\x00\x00\x11"
        b"\x94\x00\t\xc0\x0c\x00\x05\x00\x00\x80\x00@\tAndroid-3\xc01\x00/\x80"
        b"\x01\x00\x00\x00x\x00\x08\xc0\x9c\x00\x04@\x00\x00\x08\xc0l\x00\x01\x80"
        b"\x01\x00\x00\x00x\x00\x04\xc0\xa8X\x0f\xc0\x0c\x00!\x80\x01\x00\x00\x00"
        b"x\x00\x08\x00\x00\x00\x00\x19B\xc0l\xc0\x1b\x00\x0c\x00\x01\x00\x00\x11"
        b"\x94\x00\x02\xc0\x0c\t_services\x07_dns-sd\x04_udp\xc01\x00\x0c\x00\x01"
        b"\x00\x00\x11\x94\x00\x02\xc0\x1b"
    )
    parsed = r.DNSIncoming(packet)
    answer = r.DNSNsec(
        "TV Beneden (2)._androidtvremote._tcp.local.",
        const._TYPE_NSEC,
        const._CLASS_IN | const._CLASS_UNIQUE,
        const._DNS_OTHER_TTL,
        "TV Beneden (2)._androidtvremote._tcp.local.",
        [const._TYPE_TXT, const._TYPE_SRV],
    )
    assert answer in parsed.answers()


def test_dns_compression_points_to_itself():
    """Test our wire parser does not loop forever when a compression pointer points to itself."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01"
        b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0(\x00\x01\x80\x01\x00\x00\x00"
        b"\x01\x00\x04\xc0\xa8\xd0\x06"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.answers()) == 1


def test_dns_compression_points_beyond_packet():
    """Test our wire parser does not fail when the compression pointer points beyond the packet."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01"
        b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xe7\x0f\x00\x01\x80\x01\x00\x00"
        b"\x00\x01\x00\x04\xc0\xa8\xd0\x06"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.answers()) == 1


def test_dns_compression_generic_failure(caplog):
    """Test our wire parser does not loop forever when dns compression is corrupt."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01"
        b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05-\x0c\x00\x01\x80\x01\x00\x00"
        b"\x00\x01\x00\x04\xc0\xa8\xd0\x06"
    )
    parsed = r.DNSIncoming(packet, ("1.2.3.4", 5353))
    assert len(parsed.answers()) == 1
    assert "Received invalid packet from ('1.2.3.4', 5353)" in caplog.text


def test_label_length_attack():
    """Test our wire parser does not loop forever when the name exceeds 253 chars."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d"
        b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x00\x00\x01\x80"
        b"\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0\x0c\x00\x01\x80\x01\x00\x00\x00"
        b"\x01\x00\x04\xc0\xa8\xd0\x06"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.answers()) == 0


def test_label_compression_attack():
    """Test our wire parser does not loop forever when exceeding the maximum number of labels."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03atk\x00\x00\x01\x80"
        b"\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03"
        b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\xc0"
        b"\x0c\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x06"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.answers()) == 1


def test_dns_compression_loop_attack():
    """Test our wire parser does not loop forever when dns compression is in a loop."""
    packet = (
        b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x03atk\x03dns\x05loc"
        b"al\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x04a"
        b"tk2\x04dns2\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05"
        b"\x04atk3\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0"
        b"\x05\x04atk4\x04dns5\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0"
        b"\xa8\xd0\x05\x04atk5\x04dns2\xc0^\x00\x01\x80\x01\x00\x00\x00\x01\x00"
        b"\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00"
        b"\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00"
        b"\x04\xc0\xa8\xd0\x05"
    )
    parsed = r.DNSIncoming(packet)
    assert len(parsed.answers()) == 0


def test_txt_after_invalid_nsec_name_still_usable():
    """Test that we can see the txt record after the invalid nsec record."""
    packet = (
        b"\x00\x00\x84\x00\x00\x00\x00\x06\x00\x00\x00\x00\x06_sonos\x04_tcp\x05loc"
        b"al\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x15\x12Sonos-542A1BC9220E"
        b"\xc0\x0c\x12Sonos-542A1BC9220E\xc0\x18\x00/\x80\x01\x00\x00\x00x\x00"
        b"\x08\xc1t\x00\x04@\x00\x00\x08\xc0)\x00/\x80\x01\x00\x00\x11\x94\x00"
        b"\t\xc0)\x00\x05\x00\x00\x80\x00@\xc0)\x00!\x80\x01\x00\x00\x00x"
        b"\x00\x08\x00\x00\x00\x00\x05\xa3\xc0>\xc0>\x00\x01\x80\x01\x00\x00\x00x"
        b"\x00\x04\xc0\xa8\x02:\xc0)\x00\x10\x80\x01\x00\x00\x11\x94\x01*2info=/api"
        b"/v1/players/RINCON_542A1BC9220E01400/info\x06vers=3\x10protovers=1.24.1\nbo"
        b"otseq=11%hhid=Sonos_rYn9K9DLXJe0f3LP9747lbvFvh;mhhid=Sonos_rYn9K9DLXJe0f3LP9"
        b"747lbvFvh.Q45RuMaeC07rfXh7OJGm<location=http://192.168.2.58:1400/xml/device_"
        b"description.xml\x0csslport=1443\x0ehhsslport=1843\tvariant=2\x0emdnssequen"
        b"ce=0"
    )
    parsed = r.DNSIncoming(packet)
    txt_record = cast(r.DNSText, parsed.answers()[4])
    # The NSEC record with the invalid name compression should be skipped
    assert txt_record.text == (
        b"2info=/api/v1/players/RINCON_542A1BC9220E01400/info\x06vers=3\x10protovers"
        b"=1.24.1\nbootseq=11%hhid=Sonos_rYn9K9DLXJe0f3LP9747lbvFvh;mhhid=Sonos_rYn"
        b"9K9DLXJe0f3LP9747lbvFvh.Q45RuMaeC07rfXh7OJGm<location=http://192.168.2.58:14"
        b"00/xml/device_description.xml\x0csslport=1443\x0ehhsslport=1843\tvarian"
        b"t=2\x0emdnssequence=0"
    )
    assert len(parsed.answers()) == 5


def test_parse_matter_packet():
    """Test our wire parser can handle a packet from matter."""
    packet_hex = (
        "000084000000000a00000000075f6d6174746572045f746370056c6f63"
        "616c00000c000100001194002421413336303441463533314638364442"
        "372d30303030303030303030303030303636c00cc00c000c0001000011"
        "94002421333346353633363743453244333646302d3030303030303030"
        "3444423341334541c00cc00c000c000100001194002421414531313941"
        "304130374145304632302d34383742343631363639333638413332c00c"
        "c00c000c00010000119400242141333630344146353331463836444237"
        "2d30303030303030303030303030303237c00cc00c000c000100001194"
        "002421413336303441463533314638364442372d303030303030303030"
        "30303030303637c00cc00c000c00010000119400242133334635363336"
        "3743453244333646302d30303030303030304243363637324136c00cc0"
        "0c000c000100001194002421414531313941304130374145304632302d"
        "39464534383646413645373730464433c00cc00c000c00010000119400"
        "2421413336303441463533314638364442372d30303030303030303030"
        "303030303434c00cc00c000c0001000011940024213935374431413839"
        "44463239343033312d41423337393041444346434231423239c00cc00c"
        "000c000100001194002421413336303441463533314638364442372d30"
        "303030303030303030303030303638c00c"
    )
    parsed = r.DNSIncoming(bytes.fromhex(packet_hex))
    assert len(parsed.answers()) == 10
07070100000080000081A400000000000000000000000167C7AD1600002555000000000000000000000000000000000000002F00000000python-zeroconf-0.146.0/tests/test_services.py"""Unit tests for zeroconf._services."""

from __future__ import annotations

import logging
import os
import socket
import time
import unittest
from threading import Event
from typing import Any

import pytest

import zeroconf as r
from zeroconf import Zeroconf
from zeroconf._services.info import ServiceInfo

from . import _clear_cache, has_working_ipv6

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


class ListenerTest(unittest.TestCase):
    def test_integration_with_listener_class(self):
        sub_service_added = Event()
        service_added = Event()
        service_removed = Event()
        sub_service_updated = Event()
        duplicate_service_added = Event()

        subtype_name = "_printer"
        type_ = "_http._tcp.local."
        subtype = subtype_name + "._sub." + type_
        name = "UPPERxxxyyyæøå"
        registration_name = f"{name}.{subtype}"

        class MyListener(r.ServiceListener):
            def add_service(self, zeroconf, type, name):
                zeroconf.get_service_info(type, name)
                service_added.set()

            def remove_service(self, zeroconf, type, name):
                service_removed.set()

            def update_service(self, zeroconf, type, name):
                pass

        class DuplicateListener(r.ServiceListener):
            def add_service(self, zeroconf, type, name):
                duplicate_service_added.set()

            def remove_service(self, zeroconf, type, name):
                pass

            def update_service(self, zeroconf, type, name):
                pass

        class MySubListener(r.ServiceListener):
            def add_service(self, zeroconf, type, name):
                sub_service_added.set()

            def remove_service(self, zeroconf, type, name):
                pass

            def update_service(self, zeroconf, type, name):
                sub_service_updated.set()

        listener = MyListener()
        zeroconf_browser = Zeroconf(interfaces=["127.0.0.1"])
        zeroconf_browser.add_service_listener(type_, listener)

        properties = {
            "prop_none": None,
            "prop_string": b"a_prop",
            "prop_float": 1.0,
            "prop_blank": b"a blanked string",
            "prop_true": 1,
            "prop_false": 0,
        }

        zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"])
        desc: dict[str, Any] = {"path": "/~paulsm/"}
        desc.update(properties)
        addresses = [socket.inet_aton("10.0.1.2")]
        if has_working_ipv6() and not os.environ.get("SKIP_IPV6"):
            addresses.append(socket.inet_pton(socket.AF_INET6, "6001:db8::1"))
            addresses.append(socket.inet_pton(socket.AF_INET6, "2001:db8::1"))
        info_service = ServiceInfo(
            subtype,
            registration_name,
            port=80,
            properties=desc,
            server="ash-2.local.",
            addresses=addresses,
        )
        zeroconf_registrar.register_service(info_service)

        try:
            service_added.wait(1)
            assert service_added.is_set()

            # short pause to allow multicast timers to expire
            time.sleep(3)

            zeroconf_browser.add_service_listener(type_, DuplicateListener())
            duplicate_service_added.wait(
                1
            )  # Ensure a listener for the same type calls back right away from cache

            # clear the answer cache to force query
            _clear_cache(zeroconf_browser)

            cached_info = ServiceInfo(type_, registration_name)
            cached_info.load_from_cache(zeroconf_browser)
            assert cached_info.properties == {}

            # get service info without answer cache
            info = zeroconf_browser.get_service_info(type_, registration_name)
            assert info is not None
            assert info.properties[b"prop_none"] is None
            assert info.properties[b"prop_string"] == properties["prop_string"]
            assert info.properties[b"prop_float"] == b"1.0"
            assert info.properties[b"prop_blank"] == properties["prop_blank"]
            assert info.properties[b"prop_true"] == b"1"
            assert info.properties[b"prop_false"] == b"0"

            assert info.decoded_properties["prop_none"] is None
            assert info.decoded_properties["prop_string"] == b"a_prop".decode("utf-8")
            assert info.decoded_properties["prop_float"] == "1.0"
            assert info.decoded_properties["prop_blank"] == b"a blanked string".decode("utf-8")
            assert info.decoded_properties["prop_true"] == "1"
            assert info.decoded_properties["prop_false"] == "0"

            assert info.addresses == addresses[:1]  # no V6 by default
            assert set(info.addresses_by_version(r.IPVersion.All)) == set(addresses)

            cached_info = ServiceInfo(type_, registration_name)
            cached_info.load_from_cache(zeroconf_browser)
            assert cached_info.properties is not None

            # Populate the cache
            zeroconf_browser.get_service_info(subtype, registration_name)

            # get service info with only the cache
            cached_info = ServiceInfo(subtype, registration_name)
            cached_info.load_from_cache(zeroconf_browser)
            assert cached_info.properties is not None
            assert cached_info.properties[b"prop_float"] == b"1.0"

            # get service info with only the cache with the lowercase name
            cached_info = ServiceInfo(subtype, registration_name.lower())
            cached_info.load_from_cache(zeroconf_browser)
            # Ensure uppercase output is preserved
            assert cached_info.name == registration_name
            assert cached_info.key == registration_name.lower()
            assert cached_info.properties is not None
            assert cached_info.properties[b"prop_float"] == b"1.0"

            info = zeroconf_browser.get_service_info(subtype, registration_name)
            assert info is not None
            assert info.properties is not None
            assert info.properties[b"prop_none"] is None

            cached_info = ServiceInfo(subtype, registration_name.lower())
            cached_info.load_from_cache(zeroconf_browser)
            assert cached_info.properties is not None
            assert cached_info.properties[b"prop_none"] is None

            # test TXT record update
            sublistener = MySubListener()

            zeroconf_browser.add_service_listener(subtype, sublistener)

            properties["prop_blank"] = b"an updated string"
            desc.update(properties)
            info_service = ServiceInfo(
                subtype,
                registration_name,
                80,
                0,
                0,
                desc,
                "ash-2.local.",
                addresses=[socket.inet_aton("10.0.1.2")],
            )
            zeroconf_registrar.update_service(info_service)

            sub_service_added.wait(1)  # we cleared the cache above
            assert sub_service_added.is_set()

            info = zeroconf_browser.get_service_info(type_, registration_name)
            assert info is not None
            assert info.properties[b"prop_blank"] == properties["prop_blank"]
            assert info.decoded_properties["prop_blank"] == b"an updated string".decode("utf-8")

            cached_info = ServiceInfo(subtype, registration_name)
            cached_info.load_from_cache(zeroconf_browser)
            assert cached_info.properties is not None
            assert cached_info.properties[b"prop_blank"] == properties["prop_blank"]
            assert cached_info.decoded_properties["prop_blank"] == b"an updated string".decode("utf-8")

            zeroconf_registrar.unregister_service(info_service)
            service_removed.wait(1)
            assert service_removed.is_set()

        finally:
            zeroconf_registrar.close()
            zeroconf_browser.remove_service_listener(listener)
            zeroconf_browser.close()


def test_servicelisteners_raise_not_implemented():
    """Verify service listeners raise when one of the methods is not implemented."""

    class MyPartialListener(r.ServiceListener):
        """A listener that does not implement anything."""

    zc = r.Zeroconf(interfaces=["127.0.0.1"])

    with pytest.raises(NotImplementedError):
        MyPartialListener().add_service(
            zc, "_tivo-videostream._tcp.local.", "Tivo1._tivo-videostream._tcp.local."
        )
    with pytest.raises(NotImplementedError):
        MyPartialListener().remove_service(
            zc, "_tivo-videostream._tcp.local.", "Tivo1._tivo-videostream._tcp.local."
        )
    with pytest.raises(NotImplementedError):
        MyPartialListener().update_service(
            zc, "_tivo-videostream._tcp.local.", "Tivo1._tivo-videostream._tcp.local."
        )

    zc.close()


def test_signal_registration_interface():
    """Test adding and removing from the SignalRegistrationInterface."""

    interface = r.SignalRegistrationInterface([])

    def dummy():
        pass

    interface.register_handler(dummy)
    interface.unregister_handler(dummy)

    with pytest.raises(ValueError):
        interface.unregister_handler(dummy)
07070100000081000081A400000000000000000000000167C7AD1600000AF9000000000000000000000000000000000000002E00000000python-zeroconf-0.146.0/tests/test_updates.py"""Unit tests for zeroconf._updates."""

from __future__ import annotations

import logging
import socket
import time

import pytest

import zeroconf as r
from zeroconf import Zeroconf, const
from zeroconf._record_update import RecordUpdate
from zeroconf._services.browser import ServiceBrowser
from zeroconf._services.info import ServiceInfo

log = logging.getLogger("zeroconf")
original_logging_level = logging.NOTSET


def setup_module():
    global original_logging_level
    original_logging_level = log.level
    log.setLevel(logging.DEBUG)


def teardown_module():
    if original_logging_level != logging.NOTSET:
        log.setLevel(original_logging_level)


def test_legacy_record_update_listener():
    """Test a RecordUpdateListener that does not implement update_records."""

    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=["127.0.0.1"])

    with pytest.raises(RuntimeError):
        r.RecordUpdateListener().update_record(
            zc,
            0,
            r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL),
        )

    updates = []

    class LegacyRecordUpdateListener(r.RecordUpdateListener):
        """A RecordUpdateListener that does not implement update_records."""

        def update_record(self, zc: Zeroconf, now: float, record: r.DNSRecord) -> None:
            nonlocal updates
            updates.append(record)

    listener = LegacyRecordUpdateListener()

    zc.add_listener(listener, None)

    # dummy service callback
    def on_service_state_change(zeroconf, service_type, state_change, name):
        pass

    # start a browser
    type_ = "_homeassistant._tcp.local."
    name = "MyTestHome"
    browser = ServiceBrowser(zc, type_, [on_service_state_change])

    info_service = ServiceInfo(
        type_,
        f"{name}.{type_}",
        80,
        0,
        0,
        {"path": "/~paulsm/"},
        "ash-2.local.",
        addresses=[socket.inet_aton("10.0.1.2")],
    )

    zc.register_service(info_service)

    time.sleep(0.001)

    browser.cancel()

    assert len(updates)
    assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1

    zc.remove_listener(listener)
    # Removing a second time should not throw
    zc.remove_listener(listener)

    zc.close()


def test_record_update_compat():
    """Test a RecordUpdate can fetch by index."""
    new = r.DNSPointer("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, "new")
    old = r.DNSPointer("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, "old")
    update = RecordUpdate(new, old)
    assert update[0] == new
    assert update[1] == old
    with pytest.raises(IndexError):
        update[2]
    assert update.new == new
    assert update.old == old
07070100000082000041ED00000000000000000000000267C7AD1600000000000000000000000000000000000000000000002400000000python-zeroconf-0.146.0/tests/utils07070100000083000081A400000000000000000000000167C7AD16000003B2000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/tests/utils/__init__.py"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine

This module provides a framework for the use of DNS Service Discovery
using IP multicast.

This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""

from __future__ import annotations
07070100000084000081A400000000000000000000000167C7AD1600001408000000000000000000000000000000000000003400000000python-zeroconf-0.146.0/tests/utils/test_asyncio.py"""Unit tests for zeroconf._utils.asyncio."""

from __future__ import annotations

import asyncio
import concurrent.futures
import contextlib
import threading
import time
from unittest.mock import patch

import pytest

from zeroconf import EventLoopBlocked
from zeroconf._engine import _CLOSE_TIMEOUT
from zeroconf._utils import asyncio as aioutils
from zeroconf.const import _LOADED_SYSTEM_TIMEOUT


@pytest.mark.asyncio
async def test_async_get_all_tasks() -> None:
    """Test we can get all tasks in the event loop.

    We make sure we handle RuntimeError here as
    this is not thread safe under PyPy
    """
    loop = aioutils.get_running_loop()
    assert loop is not None
    await aioutils._async_get_all_tasks(loop)
    if not hasattr(asyncio, "all_tasks"):
        return
    with patch("zeroconf._utils.asyncio.asyncio.all_tasks", side_effect=RuntimeError):
        await aioutils._async_get_all_tasks(loop)


@pytest.mark.asyncio
async def test_get_running_loop_from_async() -> None:
    """Test we can get the event loop."""
    assert isinstance(aioutils.get_running_loop(), asyncio.AbstractEventLoop)


def test_get_running_loop_no_loop() -> None:
    """Test we get None when there is no loop running."""
    assert aioutils.get_running_loop() is None


@pytest.mark.asyncio
async def test_wait_future_or_timeout_times_out() -> None:
    """Test wait_future_or_timeout will timeout."""
    loop = asyncio.get_running_loop()
    test_future = loop.create_future()
    await aioutils.wait_future_or_timeout(test_future, 0.1)

    task = asyncio.ensure_future(test_future)
    await asyncio.sleep(0.1)

    async def _async_wait_or_timeout():
        await aioutils.wait_future_or_timeout(test_future, 0.1)

    # Test high lock contention
    await asyncio.gather(*[_async_wait_or_timeout() for _ in range(100)])

    task.cancel()
    with contextlib.suppress(asyncio.CancelledError):
        await task


def test_shutdown_loop() -> None:
    """Test shutting down an event loop."""
    loop = None
    loop_thread_ready = threading.Event()
    runcoro_thread_ready = threading.Event()

    def _run_loop() -> None:
        nonlocal loop
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        loop_thread_ready.set()
        loop.run_forever()

    loop_thread = threading.Thread(target=_run_loop, daemon=True)
    loop_thread.start()
    loop_thread_ready.wait()

    async def _still_running():
        await asyncio.sleep(5)

    def _run_coro() -> None:
        runcoro_thread_ready.set()
        assert loop is not None
        with contextlib.suppress(concurrent.futures.TimeoutError):
            asyncio.run_coroutine_threadsafe(_still_running(), loop).result(1)

    runcoro_thread = threading.Thread(target=_run_coro, daemon=True)
    runcoro_thread.start()
    runcoro_thread_ready.wait()

    time.sleep(0.1)
    assert loop is not None
    aioutils.shutdown_loop(loop)
    for _ in range(5):
        if not loop.is_running():
            break
        time.sleep(0.05)

    assert loop.is_running() is False
    runcoro_thread.join()


def test_cumulative_timeouts_less_than_close_plus_buffer():
    """Test that the combined async timeouts are shorter than the close timeout with the buffer.

    We want to make sure that the close timeout is the one that gets
    raised if something goes wrong.
    """
    assert (
        aioutils._TASK_AWAIT_TIMEOUT + aioutils._GET_ALL_TASKS_TIMEOUT + aioutils._WAIT_FOR_LOOP_TASKS_TIMEOUT
    ) < 1 + _CLOSE_TIMEOUT + _LOADED_SYSTEM_TIMEOUT


@pytest.mark.asyncio
async def test_run_coro_with_timeout() -> None:
    """Test running a coroutine with a timeout raises EventLoopBlocked."""
    loop = asyncio.get_event_loop()
    task: asyncio.Task | None = None

    async def _saved_sleep_task():
        nonlocal task
        task = asyncio.create_task(asyncio.sleep(0.2))
        assert task is not None
        await task

    def _run_in_loop():
        aioutils.run_coro_with_timeout(_saved_sleep_task(), loop, 0.1)

    with pytest.raises(EventLoopBlocked), patch.object(aioutils, "_LOADED_SYSTEM_TIMEOUT", 0.0):
        await loop.run_in_executor(None, _run_in_loop)

    assert task is not None
    # ensure the thread is shutdown
    task.cancel()
    await asyncio.sleep(0)
    await _shutdown_default_executor(loop)


# Remove this when we drop support for older python versions
# since we can use loop.shutdown_default_executor() in 3.9+
async def _shutdown_default_executor(loop: asyncio.AbstractEventLoop) -> None:
    """Backport of cpython 3.9 schedule the shutdown of the default executor."""
    future = loop.create_future()

    def _do_shutdown() -> None:
        try:
            loop._default_executor.shutdown(wait=True)  # type: ignore  # pylint: disable=protected-access
            loop.call_soon_threadsafe(future.set_result, None)
        except Exception as ex:  # pylint: disable=broad-except
            loop.call_soon_threadsafe(future.set_exception, ex)

    thread = threading.Thread(target=_do_shutdown)
    thread.start()
    try:
        await future
    finally:
        thread.join()
07070100000085000081A400000000000000000000000167C7AD1600000CC6000000000000000000000000000000000000003600000000python-zeroconf-0.146.0/tests/utils/test_ipaddress.py"""Unit tests for zeroconf._utils.ipaddress."""

from __future__ import annotations

from zeroconf import const
from zeroconf._dns import DNSAddress
from zeroconf._utils import ipaddress


def test_cached_ip_addresses_wrapper():
    """Test the cached_ip_addresses_wrapper."""
    assert ipaddress.cached_ip_addresses("") is None
    assert ipaddress.cached_ip_addresses("foo") is None
    assert (
        str(ipaddress.cached_ip_addresses(b"&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F"))
        == "2606:2800:220:1:248:1893:25c8:1946"
    )
    loop_back_ipv6 = ipaddress.cached_ip_addresses("::1")
    assert loop_back_ipv6 == ipaddress.IPv6Address("::1")
    assert loop_back_ipv6.is_loopback is True

    assert hash(loop_back_ipv6) == hash(ipaddress.IPv6Address("::1"))

    loop_back_ipv4 = ipaddress.cached_ip_addresses("127.0.0.1")
    assert loop_back_ipv4 == ipaddress.IPv4Address("127.0.0.1")
    assert loop_back_ipv4.is_loopback is True

    assert hash(loop_back_ipv4) == hash(ipaddress.IPv4Address("127.0.0.1"))

    ipv4 = ipaddress.cached_ip_addresses("169.254.0.0")
    assert ipv4 is not None
    assert ipv4.is_link_local is True
    assert ipv4.is_unspecified is False

    ipv4 = ipaddress.cached_ip_addresses("0.0.0.0")
    assert ipv4 is not None
    assert ipv4.is_link_local is False
    assert ipv4.is_unspecified is True

    ipv6 = ipaddress.cached_ip_addresses("fe80::1")
    assert ipv6 is not None
    assert ipv6.is_link_local is True
    assert ipv6.is_unspecified is False

    ipv6 = ipaddress.cached_ip_addresses("0:0:0:0:0:0:0:0")
    assert ipv6 is not None
    assert ipv6.is_link_local is False
    assert ipv6.is_unspecified is True


def test_get_ip_address_object_from_record():
    """Test the get_ip_address_object_from_record."""
    # not link local
    packed = b"&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F"
    record = DNSAddress(
        "domain.local",
        const._TYPE_AAAA,
        const._CLASS_IN | const._CLASS_UNIQUE,
        1,
        packed,
        scope_id=3,
    )
    assert record.scope_id == 3
    assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address(
        "2606:2800:220:1:248:1893:25c8:1946"
    )

    # link local
    packed = b"\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
    record = DNSAddress(
        "domain.local",
        const._TYPE_AAAA,
        const._CLASS_IN | const._CLASS_UNIQUE,
        1,
        packed,
        scope_id=3,
    )
    assert record.scope_id == 3
    assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1%3")
    record = DNSAddress(
        "domain.local",
        const._TYPE_AAAA,
        const._CLASS_IN | const._CLASS_UNIQUE,
        1,
        packed,
    )
    assert record.scope_id is None
    assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1")
    record = DNSAddress(
        "domain.local",
        const._TYPE_A,
        const._CLASS_IN | const._CLASS_UNIQUE,
        1,
        packed,
        scope_id=0,
    )
    assert record.scope_id == 0
    # Ensure scope_id of 0 is not appended to the address
    assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1")
07070100000086000081A400000000000000000000000167C7AD1600000CD4000000000000000000000000000000000000003100000000python-zeroconf-0.146.0/tests/utils/test_name.py"""Unit tests for zeroconf._utils.name."""

from __future__ import annotations

import socket

import pytest

from zeroconf import BadTypeInNameException
from zeroconf._services.info import ServiceInfo, instance_name_from_service_info
from zeroconf._utils import name as nameutils


def test_service_type_name_overlong_type():
    """Test overlong service_type_name type."""
    with pytest.raises(BadTypeInNameException):
        nameutils.service_type_name("Tivo1._tivo-videostream._tcp.local.")
    nameutils.service_type_name("Tivo1._tivo-videostream._tcp.local.", strict=False)


def test_service_type_name_overlong_full_name():
    """Test overlong service_type_name full name."""
    long_name = "Tivo1Tivo1Tivo1Tivo1Tivo1Tivo1Tivo1Tivo1" * 100
    with pytest.raises(BadTypeInNameException):
        nameutils.service_type_name(f"{long_name}._tivo-videostream._tcp.local.")
    with pytest.raises(BadTypeInNameException):
        nameutils.service_type_name(f"{long_name}._tivo-videostream._tcp.local.", strict=False)


@pytest.mark.parametrize(
    "instance_name, service_type",
    (
        ("CustomerInformationService-F4D4885E9EEB", "_ibisip_http._tcp.local."),
        ("DeviceManagementService_F4D4885E9EEB", "_ibisip_http._tcp.local."),
    ),
)
def test_service_type_name_non_strict_compliant_names(instance_name, service_type):
    """Test service_type_name for valid names, but not strict-compliant."""
    desc = {"path": "/~paulsm/"}
    service_name = f"{instance_name}.{service_type}"
    service_server = "ash-1.local."
    service_address = socket.inet_aton("10.0.1.2")
    info = ServiceInfo(
        service_type,
        service_name,
        22,
        0,
        0,
        desc,
        service_server,
        addresses=[service_address],
    )
    assert info.get_name() == instance_name

    with pytest.raises(BadTypeInNameException):
        nameutils.service_type_name(service_name)
    with pytest.raises(BadTypeInNameException):
        instance_name_from_service_info(info)

    nameutils.service_type_name(service_name, strict=False)
    assert instance_name_from_service_info(info, strict=False) == instance_name


def test_possible_types():
    """Test possible types from name."""
    assert nameutils.possible_types(".") == set()
    assert nameutils.possible_types("local.") == set()
    assert nameutils.possible_types("_tcp.local.") == set()
    assert nameutils.possible_types("_test-srvc-type._tcp.local.") == {"_test-srvc-type._tcp.local."}
    assert nameutils.possible_types("_any._tcp.local.") == {"_any._tcp.local."}
    assert nameutils.possible_types(".._x._tcp.local.") == {"_x._tcp.local."}
    assert nameutils.possible_types("x.y._http._tcp.local.") == {"_http._tcp.local."}
    assert nameutils.possible_types("1.2.3._mqtt._tcp.local.") == {"_mqtt._tcp.local."}
    assert nameutils.possible_types("x.sub._http._tcp.local.") == {"_http._tcp.local."}
    assert nameutils.possible_types("6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.") == {
        "_http._tcp.local.",
        "_zget._http._tcp.local.",
    }
    assert nameutils.possible_types("my._printer._sub._http._tcp.local.") == {
        "_http._tcp.local.",
        "_sub._http._tcp.local.",
        "_printer._sub._http._tcp.local.",
    }
07070100000087000081A400000000000000000000000167C7AD1600002E94000000000000000000000000000000000000003000000000python-zeroconf-0.146.0/tests/utils/test_net.py"""Unit tests for zeroconf._utils.net."""

from __future__ import annotations

import errno
import socket
import sys
import unittest
from unittest.mock import MagicMock, Mock, patch

import ifaddr
import pytest

import zeroconf as r
from zeroconf._utils import net as netutils


def _generate_mock_adapters():
    mock_lo0 = Mock(spec=ifaddr.Adapter)
    mock_lo0.nice_name = "lo0"
    mock_lo0.ips = [ifaddr.IP("127.0.0.1", 8, "lo0")]
    mock_lo0.index = 0
    mock_eth0 = Mock(spec=ifaddr.Adapter)
    mock_eth0.nice_name = "eth0"
    mock_eth0.ips = [ifaddr.IP(("2001:db8::", 1, 1), 8, "eth0")]
    mock_eth0.index = 1
    mock_eth1 = Mock(spec=ifaddr.Adapter)
    mock_eth1.nice_name = "eth1"
    mock_eth1.ips = [ifaddr.IP("192.168.1.5", 23, "eth1")]
    mock_eth1.index = 2
    mock_vtun0 = Mock(spec=ifaddr.Adapter)
    mock_vtun0.nice_name = "vtun0"
    mock_vtun0.ips = [ifaddr.IP("169.254.3.2", 16, "vtun0")]
    mock_vtun0.index = 3
    return [mock_eth0, mock_lo0, mock_eth1, mock_vtun0]


def test_ip6_to_address_and_index():
    """Test we can extract from mocked adapters."""
    adapters = _generate_mock_adapters()
    assert netutils.ip6_to_address_and_index(adapters, "2001:db8::") == (
        ("2001:db8::", 1, 1),
        1,
    )
    assert netutils.ip6_to_address_and_index(adapters, "2001:db8::%1") == (
        ("2001:db8::", 1, 1),
        1,
    )
    with pytest.raises(RuntimeError):
        assert netutils.ip6_to_address_and_index(adapters, "2005:db8::")


def test_interface_index_to_ip6_address():
    """Test we can extract from mocked adapters."""
    adapters = _generate_mock_adapters()
    assert netutils.interface_index_to_ip6_address(adapters, 1) == ("2001:db8::", 1, 1)

    # call with invalid adapter
    with pytest.raises(RuntimeError):
        assert netutils.interface_index_to_ip6_address(adapters, 6)

    # call with adapter that has ipv4 address only
    with pytest.raises(RuntimeError):
        assert netutils.interface_index_to_ip6_address(adapters, 2)


def test_ip6_addresses_to_indexes():
    """Test we can extract from mocked adapters."""
    interfaces = [1]
    with patch(
        "zeroconf._utils.net.ifaddr.get_adapters",
        return_value=_generate_mock_adapters(),
    ):
        assert netutils.ip6_addresses_to_indexes(interfaces) == [(("2001:db8::", 1, 1), 1)]

    interfaces_2 = ["2001:db8::"]
    with patch(
        "zeroconf._utils.net.ifaddr.get_adapters",
        return_value=_generate_mock_adapters(),
    ):
        assert netutils.ip6_addresses_to_indexes(interfaces_2) == [(("2001:db8::", 1, 1), 1)]


def test_normalize_interface_choice_errors():
    """Test we generate exception on invalid input."""
    with (
        patch("zeroconf._utils.net.get_all_addresses", return_value=[]),
        patch("zeroconf._utils.net.get_all_addresses_v6", return_value=[]),
        pytest.raises(RuntimeError),
    ):
        netutils.normalize_interface_choice(r.InterfaceChoice.All)

    with pytest.raises(TypeError):
        netutils.normalize_interface_choice("1.2.3.4")


@pytest.mark.parametrize(
    "errno,expected_result",
    [
        (errno.EADDRINUSE, False),
        (errno.EADDRNOTAVAIL, False),
        (errno.EINVAL, False),
        (0, True),
    ],
)
def test_add_multicast_member_socket_errors(errno, expected_result):
    """Test we handle socket errors when adding multicast members."""
    if errno:
        setsockopt_mock = unittest.mock.Mock(side_effect=OSError(errno, f"Error: {errno}"))
    else:
        setsockopt_mock = unittest.mock.Mock()
    fileno_mock = unittest.mock.PropertyMock(return_value=10)
    socket_mock = unittest.mock.Mock(setsockopt=setsockopt_mock, fileno=fileno_mock)
    assert r.add_multicast_member(socket_mock, "0.0.0.0") == expected_result


def test_autodetect_ip_version():
    """Tests for auto detecting IPVersion based on interface ips."""
    assert r.autodetect_ip_version(["1.3.4.5"]) is r.IPVersion.V4Only
    assert r.autodetect_ip_version([]) is r.IPVersion.V4Only
    assert r.autodetect_ip_version(["::1", "1.2.3.4"]) is r.IPVersion.All
    assert r.autodetect_ip_version(["::1"]) is r.IPVersion.V6Only


def test_disable_ipv6_only_or_raise():
    """Test that IPV6_V6ONLY failing logs a nice error message and still raises."""
    errors_logged = []

    def _log_error(*args):
        nonlocal errors_logged
        errors_logged.append(args)

    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    with (
        pytest.raises(OSError),
        patch.object(netutils.log, "error", _log_error),
        patch("socket.socket.setsockopt", side_effect=OSError),
    ):
        netutils.disable_ipv6_only_or_raise(sock)

    assert (
        errors_logged[0][0]
        == "Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6"
    )


@pytest.mark.skipif(not hasattr(socket, "SO_REUSEPORT"), reason="System does not have SO_REUSEPORT")
def test_set_so_reuseport_if_available_is_present():
    """Test that setting socket.SO_REUSEPORT only OSError errno.ENOPROTOOPT is trapped."""
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError):
        netutils.set_so_reuseport_if_available(sock)

    with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)):
        netutils.set_so_reuseport_if_available(sock)


@pytest.mark.skipif(hasattr(socket, "SO_REUSEPORT"), reason="System has SO_REUSEPORT")
def test_set_so_reuseport_if_available_not_present():
    """Test that we do not try to set SO_REUSEPORT if it is not present."""
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    with patch("socket.socket.setsockopt", side_effect=OSError):
        netutils.set_so_reuseport_if_available(sock)


def test_set_mdns_port_socket_options_for_ip_version():
    """Test OSError with errno with EINVAL and bind address ''.

    from setsockopt IP_MULTICAST_TTL does not raise."""
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    # Should raise on EPERM always
    with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EPERM, None)):
        netutils.set_mdns_port_socket_options_for_ip_version(sock, ("",), r.IPVersion.V4Only)

    # Should raise on EINVAL always when bind address is not ''
    with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)):
        netutils.set_mdns_port_socket_options_for_ip_version(sock, ("127.0.0.1",), r.IPVersion.V4Only)

    # Should not raise on EINVAL when bind address is ''
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)):
        netutils.set_mdns_port_socket_options_for_ip_version(sock, ("",), r.IPVersion.V4Only)


def test_add_multicast_member(caplog: pytest.LogCaptureFixture) -> None:
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    interface = "127.0.0.1"

    # EPERM should always raise
    with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EPERM, None)):
        netutils.add_multicast_member(sock, interface)

    # EADDRINUSE should return False
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRINUSE, None)):
        assert netutils.add_multicast_member(sock, interface) is False

    # EADDRNOTAVAIL should return False
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRNOTAVAIL, None)):
        assert netutils.add_multicast_member(sock, interface) is False

    # EINVAL should return False
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)):
        assert netutils.add_multicast_member(sock, interface) is False

    # ENOPROTOOPT should return False
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)):
        assert netutils.add_multicast_member(sock, interface) is False

    # ENODEV should raise for ipv4
    with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)):
        assert netutils.add_multicast_member(sock, interface) is False

    # ENODEV should return False for ipv6
    with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)):
        assert netutils.add_multicast_member(sock, ("2001:db8::", 1, 1)) is False  # type: ignore[arg-type]

    # No IPv6 support should return False for IPv6
    with patch("socket.inet_pton", side_effect=OSError()):
        assert netutils.add_multicast_member(sock, ("2001:db8::", 1, 1)) is False  # type: ignore[arg-type]

    # No error should return True
    with patch("socket.socket.setsockopt"):
        assert netutils.add_multicast_member(sock, interface) is True

    # Ran out of IGMP memberships is forgiving and logs about igmp_max_memberships on linux
    caplog.clear()
    with (
        patch.object(sys, "platform", "linux"),
        patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOBUFS, "No buffer space available")),
    ):
        assert netutils.add_multicast_member(sock, interface) is False
        assert "No buffer space available" in caplog.text
        assert "net.ipv4.igmp_max_memberships" in caplog.text

    # Ran out of IGMP memberships is forgiving and logs
    caplog.clear()
    with (
        patch.object(sys, "platform", "darwin"),
        patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOBUFS, "No buffer space available")),
    ):
        assert netutils.add_multicast_member(sock, interface) is False
        assert "No buffer space available" in caplog.text
        assert "net.ipv4.igmp_max_memberships" not in caplog.text


def test_bind_raises_skips_address():
    """Test bind failing in new_socket returns None on EADDRNOTAVAIL."""
    err = errno.EADDRNOTAVAIL

    def _mock_socket(*args, **kwargs):
        sock = MagicMock()
        sock.bind = MagicMock(side_effect=OSError(err, f"Error: {err}"))
        return sock

    with patch("socket.socket", _mock_socket):
        assert netutils.new_socket(("0.0.0.0", 0)) is None  # type: ignore[arg-type]

    err = errno.EAGAIN
    with pytest.raises(OSError), patch("socket.socket", _mock_socket):
        netutils.new_socket(("0.0.0.0", 0))  # type: ignore[arg-type]


def test_bind_raises_address_in_use(caplog: pytest.LogCaptureFixture) -> None:
    """Test bind failing in new_socket returns None on EADDRINUSE."""

    def _mock_socket(*args, **kwargs):
        sock = MagicMock()
        sock.bind = MagicMock(side_effect=OSError(errno.EADDRINUSE, f"Error: {errno.EADDRINUSE}"))
        return sock

    with (
        pytest.raises(OSError),
        patch.object(sys, "platform", "darwin"),
        patch("socket.socket", _mock_socket),
    ):
        netutils.new_socket(("0.0.0.0", 0))  # type: ignore[arg-type]
    assert (
        "On BSD based systems sharing the same port with "
        "another stack may require processes to run with the same UID"
    ) in caplog.text
    assert (
        "When using avahi, make sure disallow-other-stacks is set to no in avahi-daemon.conf" in caplog.text
    )

    caplog.clear()
    with pytest.raises(OSError), patch.object(sys, "platform", "linux"), patch("socket.socket", _mock_socket):
        netutils.new_socket(("0.0.0.0", 0))  # type: ignore[arg-type]
    assert (
        "On BSD based systems sharing the same port with "
        "another stack may require processes to run with the same UID"
    ) not in caplog.text
    assert (
        "When using avahi, make sure disallow-other-stacks is set to no in avahi-daemon.conf" in caplog.text
    )


def test_new_respond_socket_new_socket_returns_none():
    """Test new_respond_socket returns None if new_socket returns None."""
    with patch.object(netutils, "new_socket", return_value=None):
        assert netutils.new_respond_socket(("0.0.0.0", 0)) is None  # type: ignore[arg-type]
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!2026 blocks
openSUSE Build Service is sponsored by