File hupper-1.12.1.obscpio of Package python-hupper

07070100000000000081A400000000000000000000000165B376D0000000BC000000000000000000000000000000000000001A00000000hupper-1.12.1/.coveragerc[run]
parallel = true
source =
    hupper
    tests
omit =
    src/hupper/winapi.py

[paths]
source =
    src/hupper
    */site-packages/hupper

[report]
show_missing = true
precision = 2
07070100000001000081A400000000000000000000000165B376D000000175000000000000000000000000000000000000001600000000hupper-1.12.1/.flake8[flake8]
max-line-length = 79
ignore =
    # E203: whitespace before ':' (black fails to be PEP8 compliant)
    E203
    # E731: do not assign a lambda expression, use a def
    E731
    # W503: line break before binary operator (flake8 is not PEP8 compliant)
    W503
    # W504: line break after binary operator (flake8 is not PEP8 compliant)
    W504
show-source = True
07070100000002000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001600000000hupper-1.12.1/.github07070100000003000081A400000000000000000000000165B376D0000000DA000000000000000000000000000000000000002500000000hupper-1.12.1/.github/dependabot.yml# Set update schedule for GitHub Actions

version: 2
updates:

  - package-ecosystem: "github-actions"
    directory: "/"
    schedule:
      # Check for updates to GitHub Actions every weekday
      interval: "daily"
07070100000004000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000002000000000hupper-1.12.1/.github/workflows07070100000005000081A400000000000000000000000165B376D000000BB8000000000000000000000000000000000000002D00000000hupper-1.12.1/.github/workflows/ci-tests.ymlname: Build and test

on:
    # Only on pushes to main or one of the release branches we build on push
    push:
        branches:
            - main
            - "[0-9].[0-9]+-branch"
        tags:
            - "*"
    # Build pull requests
    pull_request:

jobs:
    test:
        strategy:
            matrix:
                py:
                    - "3.7"
                    - "3.8"
                    - "3.9"
                    - "3.10"
                    - "3.11"
                    - "3.12"
                    - "pypy-3.8"
                os:
                    - "ubuntu-latest"
                    - "windows-2022"
                    - "macos-12"
                architecture:
                    - x64
                    - x86

                include:
                    # Only run coverage on ubuntu-20.04, except on pypy3
                    - os: "ubuntu-latest"
                      pytest-args: "--cov"
                    - os: "ubuntu-latest"
                      py: "pypy-3.8"
                      pytest-args: ""

                exclude:
                    # Linux and macOS don't have x86 python
                    - os: "ubuntu-latest"
                      architecture: x86
                    - os: "macos-12"
                      architecture: x86

        name: "Python: ${{ matrix.py }}-${{ matrix.architecture }} on ${{ matrix.os }}"
        runs-on: ${{ matrix.os }}
        steps:
            - uses: actions/checkout@v4
            - name: Setup python
              uses: actions/setup-python@v5
              with:
                  python-version: ${{ matrix.py }}
                  architecture: ${{ matrix.architecture }}
            - run: pip install tox
            - run: ulimit -n 4096
              if: ${{ runner.os == 'macOS' }}
            - name: Running tox
              run: tox -e py -- ${{ matrix.pytest-args }}
    coverage:
        runs-on: ubuntu-latest
        name: Validate coverage
        steps:
            - uses: actions/checkout@v4
            - name: Setup python
              uses: actions/setup-python@v5
              with:
                  python-version: 3.9
                  architecture: x64
            - run: pip install tox
            - run: tox -e py39,coverage
    docs:
        runs-on: ubuntu-latest
        name: Build the documentation
        steps:
            - uses: actions/checkout@v4
            - name: Setup python
              uses: actions/setup-python@v5
              with:
                  python-version: 3.9
                  architecture: x64
            - run: pip install tox
            - run: tox -e docs
    lint:
        runs-on: ubuntu-latest
        name: Lint the package
        steps:
            - uses: actions/checkout@v4
            - name: Setup python
              uses: actions/setup-python@v5
              with:
                  python-version: 3.9
                  architecture: x64
            - run: pip install tox
            - run: tox -e lint
07070100000006000081A400000000000000000000000165B376D0000002E9000000000000000000000000000000000000001900000000hupper-1.12.1/.gitignore# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
env/
env*/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/

# Translations
*.mo
*.pot

# Django stuff:
*.log

# Sphinx documentation
docs/_build/

# PyBuilder
target/

.idea
cover
07070100000007000081A400000000000000000000000165B376D0000000FF000000000000000000000000000000000000002000000000hupper-1.12.1/.readthedocs.yaml# https://docs.readthedocs.io/en/stable/config-file/v2.html
version: 2
build:
  os: ubuntu-22.04
  tools:
    python: '3.11'
sphinx:
  configuration: docs/conf.py
python:
  install:
    - method: pip
      path: .
      extra_requirements:
        - docs
07070100000008000081A400000000000000000000000165B376D000002E41000000000000000000000000000000000000001A00000000hupper-1.12.1/CHANGES.rst1.12.1 (2024-01-26)
===================

- Add support for Python 3.12.

- Fix a blocking issue when shutting down on Windows.

- Fix a race condition closing pipes when restarting the worker process.
  See https://github.com/Pylons/hupper/pull/83

- Fix issues with watchman when the server shuts down unexpectedly and when
  subscriptions are canceled.

- Add ``hupper.get_reloader().graceful_shutdown()`` which can be used within
  your own app to trigger a full shutdown of the worker as well as the
  monitoring.
  See https://github.com/Pylons/hupper/pull/88

1.12 (2023-04-02)
=================

- When the reloader is stopped, exit with the same code received from the
  subprocess.
  See https://github.com/Pylons/hupper/pull/81

1.11 (2022-01-02)
=================

- Drop support for Python 2.7, 3.4, 3.5, and 3.6.

- Add support/testing for Python 3.10, and 3.11.

- Explicitly require ``reload_interval`` set greater than ``0`` to avoid
  spinning the CPU needlessly.

1.10.3 (2021-05-13)
===================

- Support Python 3.8 and 3.9.

- Fix an issue with bare ``.pyc`` files in the source folder causing unhandled
  exceptions.
  See https://github.com/Pylons/hupper/pull/69

- Fix issues with using the Watchman file monitor on versions newer than
  Watchman 4.9.0. This fix modifies ``hupper`` to use Watchman's
  ``watch-project`` capabilities which also support reading the
  ``.watchmanconfig`` file to control certain properties of the monitoring.
  See https://github.com/Pylons/hupper/pull/70

1.10.2 (2020-03-02)
===================

- Fix a regression that caused SIGINT to not work properly in some situations.
  See https://github.com/Pylons/hupper/pull/67

1.10.1 (2020-02-18)
===================

- Performance improvements when using Watchman.

1.10 (2020-02-18)
=================

- Handle a ``SIGTERM`` signal by forwarding it to the child process and
  gracefully waiting for it to exit. This should enable using ``hupper``
  from within docker containers and other systems that want to control
  the reloader process.

  Previously the ``SIGTERM`` would shutdown ``hupper`` immediately, stranding
  the worker and relying on it to shutdown on its own.

  See https://github.com/Pylons/hupper/pull/65

- Avoid acquiring locks in the reloader process's signal handlers.
  See https://github.com/Pylons/hupper/pull/65

- Fix deprecation warnings caused by using the ``imp`` module on newer
  versions of Python.
  See https://github.com/Pylons/hupper/pull/65

1.9.1 (2019-11-12)
==================

- Support some scenarios in which user code is symlinked ``site-packages``.
  These were previously being ignored by the file monitor but should now
  be tracked.
  See https://github.com/Pylons/hupper/pull/61

1.9 (2019-10-14)
================

- Support ``--shutdown-interval`` on the ``hupper`` CLI.
  See https://github.com/Pylons/hupper/pull/56

- Support ``--reload-interval`` on the ``hupper`` CLI.
  See https://github.com/Pylons/hupper/pull/59

- Do not choke when stdin is not a TTY while waiting for changes after a
  crash. For example, when running in Docker Compose.
  See https://github.com/Pylons/hupper/pull/58

1.8.1 (2019-06-12)
==================

- Do not show the ``KeyboardInterrupt`` stacktrace when killing ``hupper``
  while waiting for a reload.

1.8 (2019-06-11)
================

- If the worker process crashes, ``hupper`` can be forced to reload the worker
  by pressing the ``ENTER`` key in the terminal instead of waiting to change a
  file.
  See https://github.com/Pylons/hupper/pull/53

1.7 (2019-06-04)
================

- On Python 3.5+ support recursive glob syntax in ``reloader.watch_files``.
  See https://github.com/Pylons/hupper/pull/52

1.6.1 (2019-03-11)
==================

- If the worker crashes immediately, sometimes ``hupper`` would go into a
  restart loop instead of waiting for a code change.
  See https://github.com/Pylons/hupper/pull/50

1.6 (2019-03-06)
================

- On systems that support ``SIGKILL`` and ``SIGTERM`` (not Windows), ``hupper``
  will now send a ``SIGKILL`` to the worker process as a last resort. Normally,
  a ``SIGINT`` (Ctrl-C) or ``SIGTERM`` (on reload) will kill the worker. If,
  within ``shutdown_interval`` seconds, the worker doesn't exit, it will
  receive a ``SIGKILL``.
  See https://github.com/Pylons/hupper/pull/48

- Support a ``logger`` argument to ``hupper.start_reloader`` to override
  the default logger that outputs messages to ``sys.stderr``.
  See https://github.com/Pylons/hupper/pull/49

1.5 (2019-02-16)
================

- Add support for ignoring custom patterns via the new ``ignore_files``
  option on ``hupper.start_reloader``. The ``hupper`` cli also supports
  ignoring files via the ``-x`` option.
  See https://github.com/Pylons/hupper/pull/46

1.4.2 (2018-11-26)
==================

- Fix a bug prompting the "ignoring corrupted payload from watchman" message
  and placing the file monitor in an unrecoverable state when a change
  triggered a watchman message > 4096 bytes.
  See https://github.com/Pylons/hupper/pull/44

1.4.1 (2018-11-11)
==================

- Stop ignoring a few paths that may not be system paths in cases where the
  virtualenv is the root of your project.
  See https://github.com/Pylons/hupper/pull/42

1.4 (2018-10-26)
================

- Ignore changes to any system / installed files. This includes mostly
  changes to any files in the stdlib and ``site-packages``. Anything that is
  installed in editable mode or not installed at all will still be monitored.
  This drastically reduces the number of files that ``hupper`` needs to
  monitor.
  See https://github.com/Pylons/hupper/pull/40

1.3.1 (2018-10-05)
==================

- Support Python 3.7.

- Avoid a restart-loop if the app is failing to restart on certain systems.
  There was a race where ``hupper`` failed to detect that the app was
  crashing and thus fell into its restart logic when the user manually
  triggers an immediate reload.
  See https://github.com/Pylons/hupper/pull/37

- Ignore corrupted packets coming from watchman that occur in semi-random
  scenarios. See https://github.com/Pylons/hupper/pull/38

1.3 (2018-05-21)
================

- Added watchman support via ``hupper.watchman.WatchmanFileMonitor``.
  This is the new preferred file monitor on systems supporting unix sockets.
  See https://github.com/Pylons/hupper/pull/32

- The ``hupper.watchdog.WatchdogFileMonitor`` will now output some info
  when it receives ulimit or other errors from ``watchdog``.
  See https://github.com/Pylons/hupper/pull/33

- Allow ``-q`` and ``-v`` cli options to control verbosity.
  See https://github.com/Pylons/hupper/pull/33

- Pass a ``logger`` value to the ``hupper.interfaces.IFileMonitorFactory``.
  This is an instance of ``hupper.interfaces.ILogger`` and can be used by
  file monitors to output errors and debug information.
  See https://github.com/Pylons/hupper/pull/33

1.2 (2018-05-01)
================

- Track only Python source files. Previously ``hupper`` would track all pyc
  and py files. Now, if a pyc file is found then the equivalent source file
  is searched and, if found, the pyc file is ignored.
  See https://github.com/Pylons/hupper/pull/31

- Allow overriding the default monitor lookup by specifying the
  ``HUPPER_DEFAULT_MONITOR`` environment variable as a Python dotted-path
  to a monitor factory. For example,
  ``HUPPER_DEFAULT_MONITOR=hupper.polling.PollingFileMonitor``.
  See https://github.com/Pylons/hupper/pull/29

- Backward-incompatible changes to the
  ``hupper.interfaces.IFileMonitorFactory`` API to pass arbitrary kwargs
  to the factory.
  See https://github.com/Pylons/hupper/pull/29

1.1 (2018-03-29)
================

- Support ``-w`` on the CLI to watch custom file paths.
  See https://github.com/Pylons/hupper/pull/28

1.0 (2017-05-18)
================

- Copy ``sys.path`` to the worker process and ensure ``hupper`` is on the
  ``PYTHONPATH`` so that the subprocess can import it to start the worker.
  This fixes an issue with how ``zc.buildout`` injects dependencies into a
  process which is done entirely by ``sys.path`` manipulation.
  See https://github.com/Pylons/hupper/pull/27

0.5 (2017-05-10)
================

- On non-windows systems ensure an exec occurs so that the worker does not
  share the same process space as the reloader causing certain code that
  is imported in both to not ever be reloaded. Under the hood this was a
  significant rewrite to use subprocess instead of multiprocessing.
  See https://github.com/Pylons/hupper/pull/23

0.4.4 (2017-03-10)
==================

- Fix some versions of Windows which were failing to duplicate stdin to
  the subprocess and crashing.
  https://github.com/Pylons/hupper/pull/16

0.4.3 (2017-03-07)
==================

- Fix pdb and other readline-based programs to operate properly.
  See https://github.com/Pylons/hupper/pull/15

0.4.2 (2017-01-24)
==================

- Pause briefly after receiving a SIGINT to allow the worker to kill itself.
  If it does not die then it is terminated.
  See https://github.com/Pylons/hupper/issues/11

- Python 3.6 compatibility.

0.4.1 (2017-01-03)
==================

- Handle errors that may occur when using watchdog to observe non-existent
  folders.

0.4.0 (2017-01-02)
==================

- Support running any Python module via ``hupper -m <module>``. This is
  equivalent to ``python -m`` except will fully reload the process when files
  change. See https://github.com/Pylons/hupper/pull/8

0.3.6 (2016-12-18)
==================

- Read the traceback for unknown files prior to crashing. If an import
  crashes due to a module-scope exception the file that caused the crash would
  not be tracked but this should help.

0.3.5 (2016-12-17)
==================

- Attempt to send imported paths to the monitor process before crashing to
  avoid cases where the master is waiting for changes in files that it never
  started monitoring.

0.3.4 (2016-11-21)
==================

- Add support for globbing using the stdlib ``glob`` module. On Python 3.5+
  this allows recursive globs using ``**``. Prior to this, the globbing is
  more limited.

0.3.3 (2016-11-19)
==================

- Fixed a runtime failure on Windows 32-bit systems.

0.3.2 (2016-11-15)
==================

- Support triggering reloads via SIGHUP when hupper detected a crash and is
  waiting for a file to change.

- Setup the reloader proxy prior to importing the worker's module. This
  should allow some work to be done at module-scope instead of in the
  callable.

0.3.1 (2016-11-06)
==================

- Fix package long description on PyPI.

- Ensure that the stdin file handle is inheritable incase the "spawn" variant
  of multiprocessing is enabled.

0.3 (2016-11-06)
================

- Disable bytecode compiling of files imported by the worker process. This
  should not be necessary when developing and it was causing the process to
  restart twice on Windows due to how it handles pyc timestamps.

- Fix hupper's support for forwarding stdin to the worker processes on
  Python < 3.5 on Windows.

- Fix some possible file descriptor leakage.

- Simplify the ``hupper.interfaces.IFileMonitor`` interface by internalizing
  some of the hupper-specific integrations. They can now focus on just
  looking for changes.

- Add the ``hupper.interfaces.IFileMonitorFactory`` interface to improve
  the documentation for the ``callback`` argument required by
  ``hupper.interfaces.IFileMonitor``.

0.2 (2016-10-26)
================

- Windows support!

- Added support for `watchdog <https://pypi.org/project/watchdog/>`_ if it's
  installed to do inotify-style file monitoring. This is an optional dependency
  and ``hupper`` will fallback to using polling if it's not available.

0.1 (2016-10-21)
================

- Initial release.
07070100000009000081A400000000000000000000000165B376D000000C16000000000000000000000000000000000000001F00000000hupper-1.12.1/CONTRIBUTING.rst.. highlight:: shell

============
Contributing
============

Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.

You can contribute in many ways:

Types of Contributions
----------------------

Report Bugs
~~~~~~~~~~~

Report bugs at https://github.com/Pylons/hupper/issues.

If you are reporting a bug, please include:

* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.

Fix Bugs
~~~~~~~~

Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.

Implement Features
~~~~~~~~~~~~~~~~~~

Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.

Write Documentation
~~~~~~~~~~~~~~~~~~~

hupper could always use more documentation, whether as part of the
official hupper docs, in docstrings, or even on the web in blog posts,
articles, and such.

Submit Feedback
~~~~~~~~~~~~~~~

The best way to send feedback is to file an issue at
https://github.com/Pylons/hupper/issues.

If you are proposing a feature:

* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
  are welcome :)

Get Started!
------------

Ready to contribute? Here's how to set up `hupper` for local development.

1. Fork the `hupper` repo on GitHub.
2. Clone your fork locally::

    $ git clone git@github.com:your_name_here/hupper.git

3. Install your local copy into a virtualenv::

    $ python3 -m venv env
    $ env/bin/pip install -e .[docs,testing]
    $ env/bin/pip install tox

4. Create a branch for local development::

    $ git checkout -b name-of-your-bugfix-or-feature

   Now you can make your changes locally.

5. When you're done making changes, check that your changes pass flake8 and
   the tests, including testing other Python versions with tox::

    $ env/bin/tox

6. Add your name to the ``CONTRIBUTORS.txt`` file in the root of the
   repository.

7. Commit your changes and push your branch to GitHub::

    $ git add .
    $ git commit -m "Your detailed description of your changes."
    $ git push origin name-of-your-bugfix-or-feature

8. Submit a pull request through the GitHub website.

Pull Request Guidelines
-----------------------

Before you submit a pull request, check that it meets these guidelines:

1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
   your new functionality into a function with a docstring, and add the
   feature to the list in README.rst.
3. The pull request should work for Python 3.7 and up and for PyPy 3.8.
4. When your pull request is posted, a maintainer will click the button to run
   Github Actions, afterwards validate that your PR is valid for all tested
   platforms/Python versions

Tips
----

To run a subset of tests::

$ env/bin/py.test tests.test_hupper
0707010000000A000081A400000000000000000000000165B376D0000013B8000000000000000000000000000000000000001F00000000hupper-1.12.1/CONTRIBUTORS.txtPylons Project Contributor Agreement
====================================

The submitter agrees by adding his or her name within the section below named
"Contributors" and submitting the resulting modified document to the
canonical shared repository location for this software project (whether
directly, as a user with "direct commit access", or via a "pull request"), he
or she is signing a contract electronically.  The submitter becomes a
Contributor after a) he or she signs this document by adding their name
beneath the "Contributors" section below, and b) the resulting document is
accepted into the canonical version control repository.

Treatment of Account
--------------------

Contributor will not allow anyone other than the Contributor to use his or
her username or source repository login to submit code to a Pylons Project
source repository. Should Contributor become aware of any such use,
Contributor will immediately notify Agendaless Consulting.
Notification must be performed by sending an email to
webmaster@agendaless.com.  Until such notice is received, Contributor will be
presumed to have taken all actions made through Contributor's account. If the
Contributor has direct commit access, Agendaless Consulting will have
complete control and discretion over capabilities assigned to Contributor's
account, and may disable Contributor's account for any reason at any time.

Legal Effect of Contribution
----------------------------

Upon submitting a change or new work to a Pylons Project source Repository (a
"Contribution"), you agree to assign, and hereby do assign, a one-half
interest of all right, title and interest in and to copyright and other
intellectual property rights with respect to your new and original portions
of the Contribution to Agendaless Consulting. You and Agendaless Consulting
each agree that the other shall be free to exercise any and all exclusive
rights in and to the Contribution, without accounting to one another,
including without limitation, the right to license the Contribution to others
under the MIT License. This agreement shall run with title to the
Contribution. Agendaless Consulting does not convey to you any right, title
or interest in or to the Program or such portions of the Contribution that
were taken from the Program. Your transmission of a submission to the Pylons
Project source Repository and marks of identification concerning the
Contribution itself constitute your intent to contribute and your assignment
of the work in accordance with the provisions of this Agreement.

License Terms
-------------

Code committed to the Pylons Project source repository (Committed Code) must
be governed by the MIT License or another license acceptable to
Agendaless Consulting.  Until Agendaless Consulting declares in writing an
acceptable license other than the MIT License, only the MIT License shall be
used.  A list of exceptions is detailed within
the "Licensing Exceptions" section of this document, if one exists.

Representations, Warranty, and Indemnification
----------------------------------------------

Contributor represents and warrants that the Committed Code does not violate
the rights of any person or entity, and that the Contributor has legal
authority to enter into this Agreement and legal authority over Contributed
Code. Further, Contributor indemnifies Agendaless Consulting against
violations.

Cryptography
------------

Contributor understands that cryptographic code may be subject to government
regulations with which Agendaless Consulting and/or entities using Committed
Code must comply. Any code which contains any of the items listed below must
not be checked-in until Agendaless Consulting staff has been notified and has
approved such contribution in writing.

- Cryptographic capabilities or features

- Calls to cryptographic features

- User interface elements which provide context relating to cryptography

- Code which may, under casual inspection, appear to be cryptographic.

Notices
-------

Contributor confirms that any notices required will be included in any
Committed Code.

Licensing Exceptions
====================

Code committed within the ``docs/`` subdirectory of the hupper source
control repository and "docstrings" which appear in the documentation
generated by running "make" within this directory are licensed under the
Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States
License (http://creativecommons.org/licenses/by-nc-sa/3.0/us/).

List of Contributors
====================

The below-signed are contributors to a code repository that is part of the
project named "hupper".  Each below-signed contributor has read, understand
and agrees to the terms above in the section within this document entitled
"Pylons Project Contributor Agreement" as of the date beside his or her name.

Contributors
------------

- Michael Merickel (2016-10-21)
- Bert JW Regeer (2017-05-17)
- Jens Carl (2017-05-22)
- Eric Atkin (2019-02-15)
- Yeray Díaz Díaz (2019-10-03)
- Marcel Jackwerth (2023-03-23)
0707010000000B000081A400000000000000000000000165B376D000000424000000000000000000000000000000000000001A00000000hupper-1.12.1/LICENSE.txtCopyright (c) 2017 Michael Merickel

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
0707010000000C000081A400000000000000000000000165B376D00000016C000000000000000000000000000000000000001A00000000hupper-1.12.1/MANIFEST.ingraft src/hupper
graft tests
graft docs
graft .github
prune docs/_build

include README.rst
include CHANGES.rst
include LICENSE.txt
include CONTRIBUTING.rst
include CONTRIBUTORS.txt

include pyproject.toml
include setup.cfg
include .coveragerc
include .flake8
include tox.ini
include pytest.ini
include .readthedocs.yaml

recursive-exclude * __pycache__ *.py[cod]
0707010000000D000081A400000000000000000000000165B376D00000078F000000000000000000000000000000000000001900000000hupper-1.12.1/README.rst======
hupper
======

.. image:: https://img.shields.io/pypi/v/hupper.svg
    :target: https://pypi.python.org/pypi/hupper

.. image:: https://github.com/Pylons/hupper/actions/workflows/ci-tests.yml/badge.svg?branch=main
    :target: https://github.com/Pylons/hupper/actions/workflows/ci-tests.yml?query=branch%3Amain

.. image:: https://readthedocs.org/projects/hupper/badge/?version=latest
    :target: https://readthedocs.org/projects/hupper/?badge=latest
    :alt: Documentation Status

``hupper`` is an integrated process monitor that will track changes to
any imported Python files in ``sys.modules`` as well as custom paths. When
files are changed the process is restarted.

Command-line Usage
==================

Hupper can load any Python code similar to ``python -m <module>`` by using the
``hupper -m <module>`` program.

.. code-block:: console

   $ hupper -m myapp
   Starting monitor for PID 23982.

API Usage
=========

Start by defining an entry point for your process. This must be an importable
path in string format. For example, ``myapp.scripts.serve.main``.

.. code-block:: python

    # myapp/scripts/serve.py

    import sys
    import hupper
    import waitress


    def wsgi_app(environ, start_response):
        start_response('200 OK', [('Content-Type', 'text/plain')])
        yield b'hello'


    def main(args=sys.argv[1:]):
        if '--reload' in args:
            # start_reloader will only return in a monitored subprocess
            reloader = hupper.start_reloader('myapp.scripts.serve.main')

            # monitor an extra file
            reloader.watch_files(['foo.ini'])

        waitress.serve(wsgi_app)

Acknowledgments
===============

``hupper`` is inspired by initial work done by Carl J Meyer and David Glick
during a Pycon sprint and is built to be a more robust and generic version of
Ian Bicking's excellent PasteScript ``paste serve --reload`` and Pyramid's
``pserve --reload``.
0707010000000E000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001300000000hupper-1.12.1/docs0707010000000F000081A400000000000000000000000165B376D0000002F3000000000000000000000000000000000000001D00000000hupper-1.12.1/pyproject.toml[build-system]
requires = ["setuptools>=41.0.1", "wheel"]
build-backend = "setuptools.build_meta"

[tool.black]
line-length = 79
skip-string-normalization = true
target_version = ["py37", "py38", "py39", "py310", "py311"]
exclude = '''
/(
    \.git
  | \.mypy_cache
  | \.tox
  | \.venv
  | \.pytest_cache
  | dist
  | build
  | docs
)/
'''

 # This next section only exists for people that have their editors
# automatically call isort, black already sorts entries on its own when run.
[tool.isort]
profile = "black"
py_version = 3
combine_as_imports = true
line_length = 79
force_sort_within_sections = true
no_lines_before = "THIRDPARTY"
sections = "FUTURE,THIRDPARTY,FIRSTPARTY,LOCALFOLDER"
default_section = "THIRDPARTY"
known_first_party = "hupper"
07070100000010000081A400000000000000000000000165B376D00000005E000000000000000000000000000000000000001900000000hupper-1.12.1/pytest.ini[pytest]
python_files = test_*.py
testpaths =
    src/hupper
    tests
filterwarnings = error
07070100000011000081A400000000000000000000000165B376D000000735000000000000000000000000000000000000001800000000hupper-1.12.1/setup.cfg[metadata]
name = hupper
version = 1.12.1
author = Michael Merickel
author_email = pylons-discuss@googlegroups.com
license = MIT
license_files = LICENSE.txt
description = Integrated process monitor for developing and reloading daemons.
long_description = file:README.rst
long_description_content_type = text/x-rst
keywords =
    server
    daemon
    autoreload
    reloader
    hup
    file
    watch
    process
url = https://github.com/Pylons/hupper
project_urls =
    Documentation = https://docs.pylonsproject.org/projects/hupper/en/latest/
    Changelog = https://docs.pylonsproject.org/projects/hupper/en/latest/changes.html
    Issue Tracker = https://github.com/Pylons/hupper/issues
classifiers =
    Development Status :: 5 - Production/Stable
    Intended Audience :: Developers
    License :: OSI Approved :: MIT License
    Natural Language :: English
    Programming Language :: Python :: 3
    Programming Language :: Python :: 3.7
    Programming Language :: Python :: 3.8
    Programming Language :: Python :: 3.9
    Programming Language :: Python :: 3.10
    Programming Language :: Python :: 3.11
    Programming Language :: Python :: 3.12
    Programming Language :: Python :: Implementation :: CPython
    Programming Language :: Python :: Implementation :: PyPy

[options]
package_dir =
     = src
packages = find:
zip_safe = False
include_package_data = True
python_requires = >=3.7

[options.packages.find]
where = src

[options.entry_points]
console_scripts =
    hupper = hupper.cli:main

[options.extras_require]
docs =
    watchdog
    # need pkg_resources in docs/conf.py until we drop py37
    setuptools
    Sphinx
    pylons-sphinx-themes
testing =
    watchdog
    pytest
    pytest-cov
    mock

[check-manifest]
ignore-default-rules = true
ignore =
    .gitignore
    PKG-INFO
    *.egg-info
    *.egg-info/*
07070100000012000081A400000000000000000000000165B376D000000026000000000000000000000000000000000000001700000000hupper-1.12.1/setup.pyfrom setuptools import setup

setup()
07070100000013000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001200000000hupper-1.12.1/src07070100000014000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001900000000hupper-1.12.1/src/hupper07070100000015000081A400000000000000000000000165B376D0000000AE000000000000000000000000000000000000002500000000hupper-1.12.1/src/hupper/__init__.py# public api
# flake8: noqa

from .reloader import start_reloader
from .utils import is_watchdog_supported, is_watchman_supported
from .worker import get_reloader, is_active
07070100000016000081A400000000000000000000000165B376D00000073E000000000000000000000000000000000000002000000000hupper-1.12.1/src/hupper/cli.pyimport argparse
import runpy
import sys

from .logger import LogLevel
from .reloader import start_reloader


def interval_parser(string):
    """Parses the shutdown or reload interval into an int greater than 0."""
    msg = "Interval must be an int greater than 0"
    try:
        value = int(string)
        if value <= 0:
            raise argparse.ArgumentTypeError(msg)
        return value
    except ValueError:
        raise argparse.ArgumentTypeError(msg)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", dest="module", required=True)
    parser.add_argument("-w", dest="watch", action="append")
    parser.add_argument("-x", dest="ignore", action="append")
    parser.add_argument("-v", dest="verbose", action='store_true')
    parser.add_argument("-q", dest="quiet", action='store_true')
    parser.add_argument("--shutdown-interval", type=interval_parser)
    parser.add_argument("--reload-interval", type=interval_parser)

    args, unknown_args = parser.parse_known_args()

    if args.quiet:
        level = LogLevel.ERROR

    elif args.verbose:
        level = LogLevel.DEBUG

    else:
        level = LogLevel.INFO

    # start_reloader has defaults for some values so we avoid passing
    # arguments if we don't have to
    reloader_kw = {}
    if args.reload_interval is not None:
        reloader_kw['reload_interval'] = args.reload_interval
    if args.shutdown_interval is not None:
        reloader_kw['shutdown_interval'] = args.shutdown_interval

    reloader = start_reloader(
        "hupper.cli.main",
        verbose=level,
        ignore_files=args.ignore,
        **reloader_kw,
    )

    sys.argv[1:] = unknown_args
    sys.path.insert(0, "")

    if args.watch:
        reloader.watch_files(args.watch)

    return runpy.run_module(args.module, alter_sys=True, run_name="__main__")
07070100000017000081A400000000000000000000000165B376D0000006FA000000000000000000000000000000000000002700000000hupper-1.12.1/src/hupper/interfaces.pyfrom abc import ABC, abstractmethod


class IReloaderProxy(ABC):
    @abstractmethod
    def watch_files(self, files):
        """Signal to the monitor to track some custom paths."""

    @abstractmethod
    def trigger_reload(self):
        """Signal the monitor to execute a reload."""

    @abstractmethod
    def graceful_shutdown(self):
        """Signal the monitor to gracefully shutdown."""


class IFileMonitorFactory(ABC):
    @abstractmethod
    def __call__(self, callback, **kw):
        """Return an :class:`.IFileMonitor` instance.

        ``callback`` is a callable to be invoked by the ``IFileMonitor``
        when file changes are detected. It should accept the path of
        the changed file as its only parameter.

        Extra keyword-only arguments:

        ``interval`` is the value of ``reload_interval`` passed to the
        reloader and may be used to control behavior in the file monitor.

        ``logger`` is an :class:`.ILogger` instance used to record runtime
        output.

        """


class IFileMonitor(ABC):
    @abstractmethod
    def add_path(self, path):
        """Start monitoring a new path."""

    @abstractmethod
    def start(self):
        """Start the monitor. This method should not block."""

    @abstractmethod
    def stop(self):
        """Trigger the monitor to stop.

        This should be called before invoking ``join``.

        """

    @abstractmethod
    def join(self):
        """Block until the monitor has stopped."""


class ILogger(ABC):
    @abstractmethod
    def error(self, msg):
        """Record an error message."""

    @abstractmethod
    def info(self, msg):
        """Record an informational message."""

    @abstractmethod
    def debug(self, msg):
        """Record a debug-only message."""
07070100000018000081A400000000000000000000000165B376D0000024C2000000000000000000000000000000000000002000000000hupper-1.12.1/src/hupper/ipc.pyimport errno
import io
import os
import pickle
import struct
import subprocess
import sys
import threading

from .utils import WIN, is_stream_interactive, resolve_spec

if WIN:  # pragma: no cover
    import msvcrt

    from . import winapi

    class ProcessGroup(object):
        def __init__(self):
            self.h_job = winapi.CreateJobObject(None, None)

            info = winapi.JOBOBJECT_BASIC_LIMIT_INFORMATION()
            info.LimitFlags = winapi.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE

            extended_info = winapi.JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
            extended_info.BasicLimitInformation = info

            winapi.SetInformationJobObject(
                self.h_job,
                winapi.JobObjectExtendedLimitInformation,
                extended_info,
            )

        def add_child(self, pid):
            hp = winapi.OpenProcess(winapi.PROCESS_ALL_ACCESS, False, pid)
            try:
                return winapi.AssignProcessToJobObject(self.h_job, hp)
            except OSError as ex:
                if getattr(ex, 'winerror', None) == 5:
                    # skip ACCESS_DENIED_ERROR on windows < 8 which occurs when
                    # the process is already attached to another job
                    pass
                else:
                    raise

    def snapshot_termios(stream):
        pass

    def restore_termios(stream, state):
        pass

    def get_handle(fd):
        return msvcrt.get_osfhandle(fd)

    def open_handle(handle, mode):
        flags = 0
        if 'w' not in mode and '+' not in mode:
            flags |= os.O_RDONLY
        if 'b' not in mode:
            flags |= os.O_TEXT
        if 'a' in mode:
            flags |= os.O_APPEND
        return msvcrt.open_osfhandle(handle, flags)

else:
    import fcntl
    import termios

    class ProcessGroup(object):
        def add_child(self, pid):
            # nothing to do on *nix
            pass

    def snapshot_termios(stream):
        if is_stream_interactive(stream):
            state = termios.tcgetattr(stream.fileno())
            return state

    def restore_termios(stream, state):
        if state and is_stream_interactive(stream):
            fd = stream.fileno()
            termios.tcflush(fd, termios.TCIOFLUSH)
            termios.tcsetattr(fd, termios.TCSANOW, state)

    def get_handle(fd):
        return fd

    def open_handle(handle, mode):
        return handle


def _pipe():
    r, w = os.pipe()
    set_inheritable(r, False)
    set_inheritable(w, False)
    return r, w


def Pipe():
    c2pr_fd, c2pw_fd = _pipe()
    p2cr_fd, p2cw_fd = _pipe()

    c1 = Connection(c2pr_fd, p2cw_fd)
    c2 = Connection(p2cr_fd, c2pw_fd)
    return c1, c2


class Connection(object):
    """
    A connection to a bi-directional pipe.

    """

    _packet_len = struct.Struct('Q')

    send_lock = None
    reader_thread = None
    on_recv = lambda _: None

    def __init__(self, r_fd, w_fd):
        self.r_fd = r_fd
        self.w_fd = w_fd

    def __getstate__(self):
        return {
            'r_handle': get_handle(self.r_fd),
            'w_handle': get_handle(self.w_fd),
        }

    def __setstate__(self, state):
        self.r_fd = open_handle(state['r_handle'], 'rb')
        self.w_fd = open_handle(state['w_handle'], 'wb')

    def activate(self, on_recv):
        self.on_recv = on_recv

        self.send_lock = threading.Lock()

        self.reader_thread = threading.Thread(target=self._read_loop)
        self.reader_thread.daemon = True
        self.reader_thread.start()

    def close(self):
        self.on_recv = lambda _: None
        self.r_fd, r_fd = -1, self.r_fd
        self.w_fd, w_fd = -1, self.w_fd

        close_fd(w_fd)
        close_fd(r_fd)
        if self.reader_thread:
            self.reader_thread.join()

    def _recv_packet(self):
        buf = io.BytesIO()
        chunk = os.read(self.r_fd, self._packet_len.size)
        if not chunk:
            return
        size = remaining = self._packet_len.unpack(chunk)[0]
        while remaining > 0:
            chunk = os.read(self.r_fd, remaining)
            n = len(chunk)
            if n == 0:
                if remaining == size:
                    raise EOFError
                else:
                    raise IOError('got end of file during message')
            buf.write(chunk)
            remaining -= n
        return pickle.loads(buf.getvalue())

    def _read_loop(self):
        try:
            while True:
                packet = self._recv_packet()
                if packet is None:
                    break
                self.on_recv(packet)
        except EOFError:
            pass
        except OSError as e:
            if e.errno != errno.EBADF:
                raise
        self.on_recv(None)

    def _write_packet(self, data):
        while data:
            n = os.write(self.w_fd, data)
            data = data[n:]

    def send(self, value):
        data = pickle.dumps(value)
        with self.send_lock:
            self._write_packet(self._packet_len.pack(len(data)))
            self._write_packet(data)
        return len(data) + self._packet_len.size


def set_inheritable(fd, inheritable):
    # On py34+ we can use os.set_inheritable but < py34 we must polyfill
    # with fcntl and SetHandleInformation
    if hasattr(os, 'get_inheritable'):
        if os.get_inheritable(fd) != inheritable:
            os.set_inheritable(fd, inheritable)

    elif WIN:
        h = get_handle(fd)
        flags = winapi.HANDLE_FLAG_INHERIT if inheritable else 0
        winapi.SetHandleInformation(h, winapi.HANDLE_FLAG_INHERIT, flags)

    else:
        flags = fcntl.fcntl(fd, fcntl.F_GETFD)
        if inheritable:
            new_flags = flags & ~fcntl.FD_CLOEXEC
        else:
            new_flags = flags | fcntl.FD_CLOEXEC
        if new_flags != flags:
            fcntl.fcntl(fd, fcntl.F_SETFD, new_flags)


def close_fd(fd, raises=True):
    if fd is not None:
        try:
            os.close(fd)
        except Exception:  # pragma: no cover
            if raises:
                raise


def args_from_interpreter_flags():
    """
    Return a list of command-line arguments reproducing the current
    settings in sys.flags and sys.warnoptions.

    """
    flag_opt_map = {
        'debug': 'd',
        'dont_write_bytecode': 'B',
        'no_user_site': 's',
        'no_site': 'S',
        'ignore_environment': 'E',
        'verbose': 'v',
        'bytes_warning': 'b',
        'quiet': 'q',
        'optimize': 'O',
    }
    args = []
    for flag, opt in flag_opt_map.items():
        v = getattr(sys.flags, flag, 0)
        if v > 0:
            args.append('-' + opt * v)
    for opt in sys.warnoptions:
        args.append('-W' + opt)
    return args


def get_command_line(**kwds):
    prog = 'from hupper.ipc import spawn_main; spawn_main(%s)'
    prog %= ', '.join('%s=%r' % item for item in kwds.items())
    opts = args_from_interpreter_flags()
    args = [sys.executable] + opts + ['-c', prog]

    # ensure hupper is on the PYTHONPATH in the worker process
    #
    # there are some cases where hupper may only be importable because of
    # direct manipulation of sys.path (zc.buildout) which is not reflected
    # into the subprocess without us doing it manually
    # see https://github.com/Pylons/hupper/issues/25
    hupper_root = os.path.dirname(
        os.path.dirname(os.path.abspath(os.path.join(__file__)))
    )
    extra_py_paths = [hupper_root]

    env = os.environ.copy()
    env['PYTHONPATH'] = (
        os.pathsep.join(extra_py_paths)
        + os.pathsep
        + env.get('PYTHONPATH', '')
    )
    return args, env


def get_preparation_data():
    data = {}
    data['sys.argv'] = sys.argv

    # multiprocessing does some work here to replace '' in sys.path with
    # os.getcwd() but it is not valid to assume that os.getcwd() at the time
    # hupper is imported is the starting folder of the process so for now
    # we'll just assume that the user has not changed the CWD
    data['sys.path'] = list(sys.path)
    return data


def prepare(data):
    if 'sys.argv' in data:
        sys.argv = data['sys.argv']

    if 'sys.path' in data:
        sys.path = data['sys.path']


def spawn(spec, kwargs, pass_fds=()):
    """
    Invoke a python function in a subprocess.

    """
    r, w = os.pipe()
    for fd in [r] + list(pass_fds):
        set_inheritable(fd, True)

    preparation_data = get_preparation_data()

    r_handle = get_handle(r)
    args, env = get_command_line(pipe_handle=r_handle)
    process = subprocess.Popen(args, env=env, close_fds=False)

    to_child = os.fdopen(w, 'wb')
    to_child.write(pickle.dumps([preparation_data, spec, kwargs]))
    to_child.close()

    return process


def spawn_main(pipe_handle):
    fd = open_handle(pipe_handle, 'rb')
    from_parent = os.fdopen(fd, 'rb')
    preparation_data, spec, kwargs = pickle.load(from_parent)
    from_parent.close()

    prepare(preparation_data)

    func = resolve_spec(spec)
    func(**kwargs)
    sys.exit(0)


def wait(process, timeout=None):
    if timeout is None:
        return process.wait()

    if timeout == 0:
        return process.poll()

    try:
        return process.wait(timeout)
    except subprocess.TimeoutExpired:
        pass


def kill(process, soft=False):
    if soft:
        return process.terminate()
    return process.kill()
07070100000019000081A400000000000000000000000165B376D0000002C3000000000000000000000000000000000000002300000000hupper-1.12.1/src/hupper/logger.pyfrom __future__ import print_function
import sys

from .interfaces import ILogger


class LogLevel:
    ERROR = 0
    INFO = 1
    DEBUG = 2


class DefaultLogger(ILogger):
    def __init__(self, level):
        self.level = level

    def _out(self, level, msg):
        if level <= self.level:
            print(msg, file=sys.stderr)

    def error(self, msg):
        self._out(LogLevel.ERROR, '[ERROR] ' + msg)

    def info(self, msg):
        self._out(LogLevel.INFO, msg)

    def debug(self, msg):
        self._out(LogLevel.DEBUG, '[DEBUG] ' + msg)


class SilentLogger(ILogger):
    def error(self, msg):
        pass

    def info(self, msg):
        pass

    def debug(self, msg):
        pass
0707010000001A000081A400000000000000000000000165B376D000000683000000000000000000000000000000000000002400000000hupper-1.12.1/src/hupper/polling.pyimport os
import threading
import time

from .interfaces import IFileMonitor


class PollingFileMonitor(threading.Thread, IFileMonitor):
    """
    An :class:`hupper.interfaces.IFileMonitor` that stats the files
    at periodic intervals.

    ``callback`` is a callable that accepts a path to a changed file.

    ``interval`` is a value in seconds between scans of the files on disk.
    Do not set this too low or it will eat your CPU and kill your drive.

    """

    def __init__(self, callback, interval=1, **kw):
        super(PollingFileMonitor, self).__init__()
        self.callback = callback
        self.poll_interval = interval
        self.paths = set()
        self.mtimes = {}
        self.lock = threading.Lock()
        self.enabled = True

    def add_path(self, path):
        with self.lock:
            self.paths.add(path)

    def run(self):
        while self.enabled:
            with self.lock:
                paths = list(self.paths)
            self.check_reload(paths)
            time.sleep(self.poll_interval)

    def stop(self):
        self.enabled = False

    def check_reload(self, paths):
        changes = set()
        for path in paths:
            mtime = get_mtime(path)
            if path not in self.mtimes:
                self.mtimes[path] = mtime
            elif self.mtimes[path] < mtime:
                self.mtimes[path] = mtime
                changes.add(path)
        for path in sorted(changes):
            self.callback(path)


def get_mtime(path):
    try:
        stat = os.stat(path)
        if stat:
            return stat.st_mtime
    except OSError:  # pragma: no cover
        pass
    return 0
0707010000001B000081A400000000000000000000000165B376D000003DAE000000000000000000000000000000000000002500000000hupper-1.12.1/src/hupper/reloader.pyfrom collections import deque
from contextlib import contextmanager
import fnmatch
from glob import glob
import os
import re
import signal
import sys
import threading
import time

from .ipc import ProcessGroup, close_fd
from .logger import DefaultLogger, SilentLogger
from .utils import (
    WIN,
    default,
    is_stream_interactive,
    is_watchdog_supported,
    is_watchman_supported,
    resolve_spec,
)
from .worker import Worker, get_reloader, is_active

if WIN:
    from . import winapi


class FileMonitorProxy(object):
    """
    Wrap an :class:`hupper.interfaces.IFileMonitor` into an object that
    exposes a thread-safe interface back to the reloader to detect
    when it should reload.

    """

    monitor = None

    def __init__(self, callback, logger, ignore_files=None):
        self.callback = callback
        self.logger = logger
        self.changed_paths = set()
        self.ignore_files = [
            re.compile(fnmatch.translate(x)) for x in set(ignore_files or [])
        ]
        self.lock = threading.Lock()
        self.is_changed = False

    def add_path(self, path):
        # if the glob does not match any files then go ahead and pass
        # the pattern to the monitor anyway incase it is just a file that
        # is currently missing
        for p in glob(path, recursive=True) or [path]:
            if not any(x.match(p) for x in self.ignore_files):
                self.monitor.add_path(p)

    def start(self):
        self.monitor.start()

    def stop(self):
        self.monitor.stop()
        self.monitor.join()

    def file_changed(self, path):
        with self.lock:
            if path not in self.changed_paths:
                self.logger.info('{} changed; reloading ...'.format(path))
                self.changed_paths.add(path)

                if not self.is_changed:
                    self.is_changed = True
                    self.callback(self.changed_paths)

    def clear_changes(self):
        with self.lock:
            self.changed_paths = set()
            self.is_changed = False


class ControlSignal:
    byte = lambda x: chr(x).encode('ascii')

    SIGINT = byte(1)
    SIGHUP = byte(2)
    SIGTERM = byte(3)
    SIGCHLD = byte(4)
    FILE_CHANGED = byte(10)
    WORKER_COMMAND = byte(11)

    del byte


class WorkerResult:
    # exit - do not reload
    EXIT = 'exit'

    # reload immediately
    RELOAD = 'reload'

    # wait for changes before reloading
    WAIT = 'wait'


class Reloader(object):
    """
    A wrapper class around a file monitor which will handle changes by
    restarting a new worker process.

    """

    def __init__(
        self,
        worker_path,
        monitor_factory,
        logger,
        reload_interval=1,
        shutdown_interval=1,
        worker_args=None,
        worker_kwargs=None,
        ignore_files=None,
    ):
        self.worker_path = worker_path
        self.worker_args = worker_args
        self.worker_kwargs = worker_kwargs
        self.ignore_files = ignore_files
        self.monitor_factory = monitor_factory
        self.reload_interval = reload_interval
        self.shutdown_interval = shutdown_interval
        self.logger = logger
        self.monitor = None
        self.process_group = ProcessGroup()

    def run(self):
        """
        Execute the reloader forever, blocking the current thread.

        This will invoke ``sys.exit`` with the return code from the
        subprocess. If interrupted before the process starts then
        it'll exit with ``-1``.

        """
        exitcode = -1
        with self._setup_runtime():
            while True:
                result, exitcode = self._run_worker()
                if result == WorkerResult.EXIT:
                    break
                start = time.time()
                if result == WorkerResult.WAIT:
                    result, _ = self._wait_for_changes()
                    if result == WorkerResult.EXIT:
                        break
                dt = self.reload_interval - (time.time() - start)
                if dt > 0:
                    time.sleep(dt)
        sys.exit(exitcode)

    def run_once(self):
        """
        Execute the worker once.

        This method will return after the worker exits.

        Returns the exit code from the worker process.

        """
        with self._setup_runtime():
            _, exitcode = self._run_worker()
            return exitcode

    def _run_worker(self):
        worker = Worker(
            self.worker_path, args=self.worker_args, kwargs=self.worker_kwargs
        )
        return _run_worker(self, worker)

    def _wait_for_changes(self):
        worker = Worker(__name__ + '.wait_main')
        return _run_worker(
            self,
            worker,
            logger=SilentLogger(),
            shutdown_interval=0,
        )

    @contextmanager
    def _setup_runtime(self):
        with self._start_control():
            with self._start_monitor():
                with self._capture_signals():
                    yield

    @contextmanager
    def _start_control(self):
        self.control_r, self.control_w = os.pipe()
        try:
            yield
        finally:
            close_fd(self.control_w)
            close_fd(self.control_r)
            self.control_r = self.control_w = None

    def _control_proxy(self, signal):
        return lambda *args: os.write(self.control_w, signal)

    @contextmanager
    def _start_monitor(self):
        proxy = FileMonitorProxy(
            self._control_proxy(ControlSignal.FILE_CHANGED),
            self.logger,
            self.ignore_files,
        )
        proxy.monitor = self.monitor_factory(
            proxy.file_changed,
            interval=self.reload_interval,
            logger=self.logger,
        )
        self.monitor = proxy
        self.monitor.start()
        try:
            yield
        finally:
            self.monitor = None
            proxy.stop()

    _signals = {
        'SIGINT': ControlSignal.SIGINT,
        'SIGHUP': ControlSignal.SIGHUP,
        'SIGTERM': ControlSignal.SIGTERM,
        'SIGCHLD': ControlSignal.SIGCHLD,
    }

    @contextmanager
    def _capture_signals(self):
        undo_handlers = []
        try:
            for signame, control in self._signals.items():
                signum = getattr(signal, signame, None)
                if signum is None:
                    self.logger.debug(
                        'Skipping unsupported signal={}'.format(signame)
                    )
                    continue
                handler = self._control_proxy(control)
                if WIN and signame == 'SIGINT':
                    undo = winapi.AddConsoleCtrlHandler(handler)
                    undo_handlers.append(undo)
                    handler = signal.SIG_IGN
                psig = signal.signal(signum, handler)
                undo_handlers.append(
                    lambda s=signum, p=psig: signal.signal(s, p)
                )
            yield
        finally:
            for undo in reversed(undo_handlers):
                undo()


def _run_worker(self, worker, logger=None, shutdown_interval=None):
    if logger is None:
        logger = self.logger

    if shutdown_interval is None:
        shutdown_interval = self.shutdown_interval

    packets = deque()

    def handle_packet(packet):
        packets.append(packet)
        os.write(self.control_w, ControlSignal.WORKER_COMMAND)

    self.monitor.clear_changes()

    worker.start(handle_packet)
    result = WorkerResult.WAIT
    soft_kill = True

    logger.info('Starting monitor for PID %s.' % worker.pid)
    try:
        # register the worker with the process group
        self.process_group.add_child(worker.pid)

        while True:
            # process all packets before moving on to signals to avoid
            # missing any files that need to be watched
            if packets:
                cmd = packets.popleft()

                if cmd is None:
                    if worker.is_alive:
                        # the worker socket has died but the process is still
                        # alive (somehow) so wait a brief period to see if it
                        # dies on its own - if it does die then we want to
                        # treat it as a crash and wait for changes before
                        # reloading, if it doesn't die then we want to force
                        # reload the app immediately because it probably
                        # didn't die due to some file changes
                        time.sleep(1)

                    if worker.is_alive:
                        logger.info(
                            'Worker pipe died unexpectedly, triggering a '
                            'reload.'
                        )
                        result = WorkerResult.RELOAD
                        break

                    os.write(self.control_w, ControlSignal.SIGCHLD)
                    continue

                logger.debug('Received worker command "{}".'.format(cmd[0]))
                if cmd[0] == 'reload':
                    result = WorkerResult.RELOAD
                    break

                elif cmd[0] == 'watch_files':
                    for path in cmd[1]:
                        self.monitor.add_path(path)

                elif cmd[0] == 'graceful_shutdown':
                    os.write(self.control_w, ControlSignal.SIGTERM)

                else:  # pragma: no cover
                    raise RuntimeError('received unknown control signal', cmd)

                # done handling the packet, continue to the next one
                # do not fall through here because it will block
                continue

            signal = os.read(self.control_r, 1)

            if not signal:
                logger.error('Control pipe died unexpectedly.')
                result = WorkerResult.EXIT
                break

            elif signal == ControlSignal.SIGINT:
                logger.info('Received SIGINT, waiting for server to exit ...')
                result = WorkerResult.EXIT

                # normally a SIGINT is sent automatically to the process
                # group and we want to avoid forwarding both a SIGINT and a
                # SIGTERM at the same time
                #
                # in the off chance that the SIGINT is not sent, we'll
                # just terminate after waiting shutdown_interval
                soft_kill = False
                break

            elif signal == ControlSignal.SIGHUP:
                logger.info('Received SIGHUP, triggering a reload.')
                result = WorkerResult.RELOAD
                break

            elif signal == ControlSignal.SIGTERM:
                logger.info('Received SIGTERM, triggering a shutdown.')
                result = WorkerResult.EXIT
                break

            elif signal == ControlSignal.FILE_CHANGED:
                if self.monitor.is_changed:
                    result = WorkerResult.RELOAD
                    break

            elif signal == ControlSignal.SIGCHLD:
                if not worker.is_alive:
                    break

        if worker.is_alive and shutdown_interval:
            if soft_kill:
                logger.info('Gracefully killing the server.')
                worker.kill(soft=True)
            worker.wait(shutdown_interval)

    finally:
        if worker.is_alive:
            logger.info('Server did not exit, forcefully killing.')
            worker.kill()
            worker.join()

        else:
            worker.join()
        logger.debug('Server exited with code %d.' % worker.exitcode)

    return result, worker.exitcode


def wait_main():
    try:
        reloader = get_reloader()
        if is_stream_interactive(sys.stdin):
            input('Press ENTER or change a file to reload.\n')
            reloader.trigger_reload()
        else:
            # just block while we wait for a file to change
            print('Waiting for a file to change before reload.')
            while True:
                time.sleep(10)
    except KeyboardInterrupt:
        pass


def find_default_monitor_factory(logger):
    spec = os.getenv('HUPPER_DEFAULT_MONITOR')
    if spec:
        monitor_factory = resolve_spec(spec)

        logger.debug('File monitor backend: ' + spec)

    elif is_watchman_supported():
        from .watchman import WatchmanFileMonitor as monitor_factory

        logger.debug('File monitor backend: watchman')

    elif is_watchdog_supported():
        from .watchdog import WatchdogFileMonitor as monitor_factory

        logger.debug('File monitor backend: watchdog')

    else:
        from .polling import PollingFileMonitor as monitor_factory

        logger.debug('File monitor backend: polling')

    return monitor_factory


def start_reloader(
    worker_path,
    reload_interval=1,
    shutdown_interval=default,
    verbose=1,
    logger=None,
    monitor_factory=None,
    worker_args=None,
    worker_kwargs=None,
    ignore_files=None,
):
    """
    Start a monitor and then fork a worker process which starts by executing
    the importable function at ``worker_path``.

    If this function is called from a worker process that is already being
    monitored then it will return a reference to the current
    :class:`hupper.interfaces.IReloaderProxy` which can be used to
    communicate with the monitor.

    ``worker_path`` must be a dotted string pointing to a globally importable
    function that will be executed to start the worker. An example could be
    ``myapp.cli.main``. In most cases it will point at the same function that
    is invoking ``start_reloader`` in the first place.

    ``reload_interval`` is a value in seconds and will be used to throttle
    restarts. Default is ``1``.

    ``shutdown_interval`` is a value in seconds and will be used to trigger
    a graceful shutdown of the server. Set to ``None`` to disable the graceful
    shutdown. Default is the same as ``reload_interval``.

    ``verbose`` controls the output. Set to ``0`` to turn off any logging
    of activity and turn up to ``2`` for extra output. Default is ``1``.

    ``logger``, if supplied, supersedes ``verbose`` and should be an object
    implementing :class:`hupper.interfaces.ILogger`.

    ``monitor_factory`` is an instance of
    :class:`hupper.interfaces.IFileMonitorFactory`. If left unspecified, this
    will try to create a :class:`hupper.watchdog.WatchdogFileMonitor` if
    `watchdog <https://pypi.org/project/watchdog/>`_ is installed and will
    fallback to the less efficient
    :class:`hupper.polling.PollingFileMonitor` otherwise.

    If ``monitor_factory`` is ``None`` it can be overridden by the
    ``HUPPER_DEFAULT_MONITOR`` environment variable. It should be a dotted
    python path pointing at an object implementing
    :class:`hupper.interfaces.IFileMonitorFactory`.

    ``ignore_files`` if provided must be an iterable of shell-style patterns
    to ignore.
    """
    if is_active():
        return get_reloader()

    if logger is None:
        logger = DefaultLogger(verbose)

    if monitor_factory is None:
        monitor_factory = find_default_monitor_factory(logger)

    if shutdown_interval is default:
        shutdown_interval = reload_interval

    if reload_interval <= 0:
        raise ValueError(
            'reload_interval must be greater than 0 to avoid spinning'
        )

    reloader = Reloader(
        worker_path=worker_path,
        worker_args=worker_args,
        worker_kwargs=worker_kwargs,
        reload_interval=reload_interval,
        shutdown_interval=shutdown_interval,
        monitor_factory=monitor_factory,
        logger=logger,
        ignore_files=ignore_files,
    )
    return reloader.run()
0707010000001C000081A400000000000000000000000165B376D00000054E000000000000000000000000000000000000002200000000hupper-1.12.1/src/hupper/utils.pyimport importlib
import json
import os
import subprocess
import sys

WIN = sys.platform == 'win32'


class Sentinel(object):
    def __init__(self, name):
        self.name = name

    def __repr__(self):
        return '<{0}>'.format(self.name)


default = Sentinel('default')


def resolve_spec(spec):
    modname, funcname = spec.rsplit('.', 1)
    module = importlib.import_module(modname)
    func = getattr(module, funcname)
    return func


def is_watchdog_supported():
    """Return ``True`` if watchdog is available."""
    try:
        import watchdog  # noqa: F401
    except ImportError:
        return False
    return True


def is_watchman_supported():
    """Return ``True`` if watchman is available."""
    if WIN:
        # for now we aren't bothering with windows sockets
        return False

    try:
        sockpath = get_watchman_sockpath()
        return bool(sockpath)
    except Exception:
        return False


def get_watchman_sockpath(binpath='watchman'):
    """Find the watchman socket or raise."""
    path = os.getenv('WATCHMAN_SOCK')
    if path:
        return path

    cmd = [binpath, '--output-encoding=json', 'get-sockname']
    result = subprocess.check_output(cmd)
    result = json.loads(result)
    return result['sockname']


def is_stream_interactive(stream):
    return stream is not None and stream.isatty()
0707010000001D000081A400000000000000000000000165B376D0000007A9000000000000000000000000000000000000002500000000hupper-1.12.1/src/hupper/watchdog.py# check ``hupper.utils.is_watchdog_supported`` before using this module
from __future__ import absolute_import
import os.path
import threading
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer

from .interfaces import IFileMonitor


class WatchdogFileMonitor(FileSystemEventHandler, Observer, IFileMonitor):
    """
    An :class:`hupper.interfaces.IFileMonitor` that uses ``watchdog``
    to watch for file changes uses inotify.

    ``callback`` is a callable that accepts a path to a changed file.

    ``logger`` is an :class:`hupper.interfaces.ILogger` instance.

    """

    def __init__(self, callback, logger, **kw):
        super(WatchdogFileMonitor, self).__init__()
        self.callback = callback
        self.logger = logger
        self.paths = set()
        self.dirpaths = set()
        self.lock = threading.Lock()

    def add_path(self, path):
        with self.lock:
            dirpath = os.path.dirname(path)
            if dirpath not in self.dirpaths:
                try:
                    self.schedule(self, dirpath)
                except OSError as ex:  # pragma: no cover
                    # watchdog raises exceptions if folders are missing
                    # or if the ulimit is passed
                    self.logger.error('watchdog error: ' + str(ex))
                else:
                    self.dirpaths.add(dirpath)

            if path not in self.paths:
                self.paths.add(path)

    def _check(self, path):
        with self.lock:
            if path in self.paths:
                self.callback(path)

    def on_created(self, event):
        self._check(event.src_path)

    def on_modified(self, event):
        self._check(event.src_path)

    def on_moved(self, event):
        self._check(event.src_path)
        self._check(event.dest_path)
        self.add_path(event.dest_path)

    def on_deleted(self, event):
        self._check(event.src_path)
0707010000001E000081A400000000000000000000000165B376D000001B24000000000000000000000000000000000000002500000000hupper-1.12.1/src/hupper/watchman.py# check ``hupper.utils.is_watchman_supported`` before using this module
import errno
import json
import os
import queue
import select
import socket
import threading
import time

from .interfaces import IFileMonitor
from .utils import get_watchman_sockpath


class WatchmanFileMonitor(threading.Thread, IFileMonitor):
    """
    An :class:`hupper.interfaces.IFileMonitor` that uses Facebook's
    ``watchman`` daemon to detect changes.

    ``callback`` is a callable that accepts a path to a changed file.

    """

    def __init__(
        self,
        callback,
        logger,
        sockpath=None,
        binpath='watchman',
        timeout=10.0,
        **kw,
    ):
        super(WatchmanFileMonitor, self).__init__()
        self.callback = callback
        self.logger = logger
        self.watches = set()
        self.paths = set()
        self.lock = threading.Lock()
        self.enabled = True
        self.sockpath = sockpath
        self.binpath = binpath
        self.timeout = timeout
        self.responses = queue.Queue()

    def add_path(self, path):
        is_new_root = False
        with self.lock:
            root = os.path.dirname(path)
            for watch in self.watches:
                if watch == root or root.startswith(watch + os.sep):
                    break
            else:
                is_new_root = True

            if path not in self.paths:
                self.paths.add(path)

        # it's important to release the above lock before invoking _watch
        # on a new root to prevent deadlocks
        if is_new_root:
            self._watch(root)

    def start(self):
        sockpath = self._resolve_sockpath()
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sock.connect(sockpath)
        self._sock = sock
        self._recvbufs = []

        self._send(['version'])
        result = self._recv()
        self.logger.debug('watchman v' + result['version'] + '.')

        super(WatchmanFileMonitor, self).start()

    def join(self):
        try:
            return super(WatchmanFileMonitor, self).join()
        finally:
            self._close_sock()

    def stop(self):
        self.enabled = False
        self._close_sock()

    def run(self):
        while self.enabled:
            try:
                result = self._recv()
            except socket.timeout:
                continue
            except OSError as ex:
                if ex.errno == errno.EBADF:
                    # this means the socket is closed which should only happen
                    # when stop is invoked, leaving enabled false
                    if self.enabled:
                        self.logger.error(
                            'Lost connection to watchman. No longer watching'
                            ' for changes.'
                        )
                    break
                raise

            self._handle_result(result)

    def _handle_result(self, result):
        if 'warning' in result:
            self.logger.error('watchman warning: ' + result['warning'])

        if 'error' in result:
            self.logger.error('watchman error: ' + result['error'])

        if 'subscription' in result:
            root = result['root']

            if result.get('canceled'):
                self.logger.info(
                    'watchman has stopped following root: ' + root
                )
                with self.lock:
                    self.watches.remove(root)

            else:
                files = result['files']
                with self.lock:
                    for f in files:
                        if isinstance(f, dict):
                            f = f['name']
                        path = os.path.join(root, f)
                        if path in self.paths:
                            self.callback(path)

        if not self._is_unilateral(result):
            self.responses.put(result)

    def _is_unilateral(self, result):
        if 'unilateral' in result and result['unilateral']:
            return True
        # fallback to checking for known unilateral responses
        for k in ['log', 'subscription']:
            if k in result:
                return True
        return False

    def _close_sock(self):
        if self._sock:
            try:
                self._sock.close()
            except Exception:
                pass
            finally:
                self._sock = None

    def _resolve_sockpath(self):
        if self.sockpath:
            return self.sockpath
        return get_watchman_sockpath(self.binpath)

    def _watch(self, root):
        result = self._query(['watch-project', root])
        if result['watch'] != root:
            root = result['watch']
        self._query(
            [
                'subscribe',
                root,
                '{}.{}.{}'.format(os.getpid(), id(self), root),
                {
                    # +1 second because we don't want any buffered changes
                    # if the daemon is already watching the folder
                    'since': int(time.time() + 1),
                    'expression': ['type', 'f'],
                    'fields': ['name'],
                },
            ]
        )
        self.logger.debug('watchman is now tracking root: ' + root)
        with self.lock:
            self.watches.add(root)

    def _readline(self):
        # buffer may already have a line
        if len(self._recvbufs) == 1 and b'\n' in self._recvbufs[0]:
            line, b = self._recvbufs[0].split(b'\n', 1)
            self._recvbufs = [b]
            return line

        while True:
            # use select because it unblocks immediately when the socket is
            # closed unlike sock.settimeout which does not
            ready_r, _, _ = select.select([self._sock], [], [], self.timeout)
            if self._sock not in ready_r:
                continue
            b = self._sock.recv(4096)
            if not b:
                self.logger.error(
                    'Lost connection to watchman. No longer watching for'
                    ' changes.'
                )
                self.stop()
                raise socket.timeout
            if b'\n' in b:
                result = b''.join(self._recvbufs)
                line, b = b.split(b'\n', 1)
                self._recvbufs = [b]
                return result + line
            self._recvbufs.append(b)

    def _recv(self):
        line = self._readline().decode('utf8')
        try:
            return json.loads(line)
        except Exception:  # pragma: no cover
            self.logger.info(
                'Ignoring corrupted payload from watchman: ' + line
            )
            return {}

    def _send(self, msg):
        cmd = json.dumps(msg).encode('ascii')
        self._sock.sendall(cmd + b'\n')

    def _query(self, msg, timeout=None):
        self._send(msg)
        return self.responses.get(timeout=timeout)
0707010000001F000081A400000000000000000000000165B376D0000015CD000000000000000000000000000000000000002300000000hupper-1.12.1/src/hupper/winapi.pyimport ctypes
from ctypes import WINFUNCTYPE, wintypes

kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)

if ctypes.sizeof(ctypes.c_void_p) == 8:
    ULONG_PTR = ctypes.c_int64
else:
    ULONG_PTR = ctypes.c_ulong
BOOL = wintypes.BOOL
DWORD = wintypes.DWORD
HANDLE = wintypes.HANDLE
LARGE_INTEGER = wintypes.LARGE_INTEGER
SIZE_T = ULONG_PTR
ULONGLONG = ctypes.c_uint64
PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)

JobObjectAssociateCompletionPortInformation = 7
JobObjectBasicLimitInformation = 2
JobObjectBasicUIRestrictions = 4
JobObjectEndOfJobTimeInformation = 6
JobObjectExtendedLimitInformation = 9
JobObjectSecurityLimitInformation = 5
JobObjectGroupInformation = 11

JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000

DELETE = 0x00010000
READ_CONTROL = 0x00020000
SYNCHRONIZE = 0x00100000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
STANDARD_RIGHTS_REQUIRED = DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER

PROCESS_CREATE_PROCESS = 0x0080
PROCESS_CREATE_THREAD = 0x0002
PROCESS_DUP_HANDLE = 0x0040
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
PROCESS_SET_INFORMATION = 0x0200
PROCESS_SET_QUOTA = 0x0100
PROCESS_SUSPEND_RESUME = 0x0800
PROCESS_TERMINATE = 0x0001
PROCESS_VM_OPERATION = 0x0008
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFF

DUPLICATE_SAME_ACCESS = 0x0002

HANDLE_FLAG_INHERIT = 0x0001
HANDLE_FLAG_PROTECT_FROM_CLOSE = 0x0002


class IO_COUNTERS(ctypes.Structure):
    _fields_ = [
        ('ReadOperationCount', ULONGLONG),
        ('WriteOperationCount', ULONGLONG),
        ('OtherOperationCount', ULONGLONG),
        ('ReadTransferCount', ULONGLONG),
        ('WriteTransferCount', ULONGLONG),
        ('OtherTransferCount', ULONGLONG),
    ]


class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
    _fields_ = [
        ('PerProcessUserTimeLimit', LARGE_INTEGER),
        ('PerJobUserTimeLimit', LARGE_INTEGER),
        ('LimitFlags', DWORD),
        ('MinimumWorkingSetSize', SIZE_T),
        ('MaximumWorkingSetSize', SIZE_T),
        ('ActiveProcessLimit', DWORD),
        ('Affinity', ULONG_PTR),
        ('PriorityClass', DWORD),
        ('SchedulingClass', DWORD),
    ]


class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
    _fields_ = [
        ('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),
        ('IoInfo', IO_COUNTERS),
        ('ProcessMemoryLimit', SIZE_T),
        ('JobMemoryLimit', SIZE_T),
        ('PeakProcessMemoryUsed', SIZE_T),
        ('PeakJobMemoryUsed', SIZE_T),
    ]


class Handle(HANDLE):
    closed = False

    def Close(self):
        if not self.closed:
            self.closed = True
            CloseHandle(self)

    def Detach(self):
        if not self.closed:
            self.closed = True
            return self.value
        raise ValueError("already closed")

    def __repr__(self):
        return "%s(%d)" % (self.__class__.__name__, self.value)

    __del__ = Close
    __str__ = __repr__


def CloseHandle(h):
    kernel32.CloseHandle(h)


def CheckError(result, msg):
    if not result:
        raise ctypes.WinError(ctypes.get_last_error(), msg)


def DuplicateHandle(
    hSourceProcess,
    hSourceHandle,
    hTargetProcess,
    desiredAccess,
    inheritHandle,
    options,
):
    targetHandle = wintypes.HANDLE()
    ret = kernel32.DuplicateHandle(
        hSourceProcess,
        hSourceHandle,
        hTargetProcess,
        ctypes.byref(targetHandle),
        desiredAccess,
        inheritHandle,
        options,
    )
    CheckError(ret, 'failed to duplicate handle')
    return Handle(targetHandle.value)


def GetCurrentProcess():
    hp = kernel32.GetCurrentProcess()
    return Handle(hp)


def OpenProcess(desiredAccess, inherit, pid):
    hp = kernel32.OpenProcess(desiredAccess, inherit, pid)
    CheckError(hp, 'failed to open process')
    return Handle(hp)


def CreateJobObject(jobAttributes, name):
    hp = kernel32.CreateJobObjectA(jobAttributes, name)
    CheckError(hp, 'failed to create job object')
    return Handle(hp)


def SetInformationJobObject(hJob, infoType, jobObjectInfo):
    ret = kernel32.SetInformationJobObject(
        hJob,
        infoType,
        ctypes.byref(jobObjectInfo),
        ctypes.sizeof(jobObjectInfo),
    )
    CheckError(ret, 'failed to set information job object')


def AssignProcessToJobObject(hJob, hProcess):
    ret = kernel32.AssignProcessToJobObject(hJob, hProcess)
    CheckError(ret, 'failed to assign process to job object')


def SetHandleInformation(h, dwMask, dwFlags):
    ret = kernel32.SetHandleInformation(h, dwMask, dwFlags)
    CheckError(ret, 'failed to set handle information')


CTRL_C_EVENT = 0
CTRL_BREAK_EVENT = 1
CTRL_CLOSE_EVENT = 2
CTRL_LOGOFF_EVENT = 5
CTRL_SHUTDOWN_EVENT = 6


def SetConsoleCtrlHandler(handler, add):
    SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
    SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
    SetConsoleCtrlHandler.restype = BOOL

    ret = SetConsoleCtrlHandler(handler, add)
    CheckError(ret, 'failed in to set console ctrl handler')


def AddConsoleCtrlHandler(handler):
    @PHANDLER_ROUTINE
    def console_handler(ctrl_type):
        if ctrl_type in (
            CTRL_C_EVENT,
            CTRL_BREAK_EVENT,
            CTRL_CLOSE_EVENT,
            CTRL_LOGOFF_EVENT,
            CTRL_SHUTDOWN_EVENT,
        ):
            handler()
            return True
        return False

    SetConsoleCtrlHandler(console_handler, True)
    return lambda: SetConsoleCtrlHandler(console_handler, False)
07070100000020000081A400000000000000000000000165B376D0000021C2000000000000000000000000000000000000002300000000hupper-1.12.1/src/hupper/worker.pyfrom _thread import interrupt_main
from importlib.util import source_from_cache
import os
import signal
import site
import sys
import sysconfig
import threading
import time
import traceback

from . import ipc
from .interfaces import IReloaderProxy
from .utils import resolve_spec


class WatchSysModules(threading.Thread):
    """Poll ``sys.modules`` for imported modules."""

    poll_interval = 1
    ignore_system_paths = True

    def __init__(self, callback):
        super(WatchSysModules, self).__init__()
        self.paths = set()
        self.callback = callback
        self.lock = threading.Lock()
        self.stopped = False
        self.system_paths = get_system_paths()

    def run(self):
        while not self.stopped:
            self.update_paths()
            time.sleep(self.poll_interval)

    def stop(self):
        self.stopped = True

    def update_paths(self):
        """Check sys.modules for paths to add to our path set."""
        new_paths = []
        with self.lock:
            for path in expand_source_paths(iter_module_paths()):
                if path not in self.paths:
                    self.paths.add(path)
                    new_paths.append(path)
        if new_paths:
            self.watch_paths(new_paths)

    def search_traceback(self, tb):
        """Inspect a traceback for new paths to add to our path set."""
        new_paths = []
        with self.lock:
            for filename, *_ in traceback.extract_tb(tb):
                path = os.path.abspath(filename)
                if path not in self.paths:
                    self.paths.add(path)
                    new_paths.append(path)
        if new_paths:
            self.watch_paths(new_paths)

    def watch_paths(self, paths):
        if self.ignore_system_paths:
            paths = [path for path in paths if not self.in_system_paths(path)]
        if paths:
            self.callback(paths)

    def in_system_paths(self, path):
        # use realpath to only ignore files that live in a system path
        # versus a symlink which lives elsewhere
        path = os.path.realpath(path)
        for prefix in self.system_paths:
            if path.startswith(prefix):
                return True
        return False


def get_py_path(path):
    try:
        return source_from_cache(path)
    except ValueError:
        # fallback for solitary *.pyc files outside of __pycache__
        return path[:-1]


def get_site_packages():  # pragma: no cover
    try:
        paths = site.getsitepackages()
        if site.ENABLE_USER_SITE:
            paths.append(site.getusersitepackages())
        return paths

    # virtualenv does not ship with a getsitepackages impl so we fallback
    # to using distutils if we can
    # https://github.com/pypa/virtualenv/issues/355
    except Exception:
        try:
            from distutils.sysconfig import get_python_lib

            return [get_python_lib()]

        # just incase, don't fail here, it's not worth it
        except Exception:
            return []


def get_system_paths():
    paths = get_site_packages()
    for name in {'stdlib', 'platstdlib', 'platlib', 'purelib'}:
        path = sysconfig.get_path(name)
        if path is not None:
            paths.append(path)
    return paths


def expand_source_paths(paths):
    """Convert pyc files into their source equivalents."""
    for src_path in paths:
        # only track the source path if we can find it to avoid double-reloads
        # when the source and the compiled path change because on some
        # platforms they are not changed at the same time
        if src_path.endswith(('.pyc', '.pyo')):
            py_path = get_py_path(src_path)
            if os.path.exists(py_path):
                src_path = py_path
        yield src_path


def iter_module_paths(modules=None):
    """Yield paths of all imported modules."""
    modules = modules or list(sys.modules.values())
    for module in modules:
        try:
            filename = module.__file__
        except (AttributeError, ImportError):  # pragma: no cover
            continue
        if filename is not None:
            abs_filename = os.path.abspath(filename)
            if os.path.isfile(abs_filename):
                yield abs_filename


class Worker(object):
    """A helper object for managing a worker process lifecycle."""

    def __init__(self, spec, args=None, kwargs=None):
        super(Worker, self).__init__()
        self.worker_spec = spec
        self.worker_args = args
        self.worker_kwargs = kwargs
        self.pipe, self._child_pipe = ipc.Pipe()
        self.pid = None
        self.process = None
        self.exitcode = None
        self.stdin_termios = None

    def start(self, on_packet=None):
        self.stdin_termios = ipc.snapshot_termios(sys.stdin)

        kw = dict(
            spec=self.worker_spec,
            spec_args=self.worker_args,
            spec_kwargs=self.worker_kwargs,
            pipe=self._child_pipe,
        )
        self.process = ipc.spawn(
            __name__ + '.worker_main',
            kwargs=kw,
            pass_fds=[self._child_pipe.r_fd, self._child_pipe.w_fd],
        )
        self.pid = self.process.pid

        # activate the pipe after forking
        self.pipe.activate(on_packet)

        # kill the child side of the pipe after forking as the child is now
        # responsible for it
        self._child_pipe.close()

    @property
    def is_alive(self):
        if self.exitcode is not None:
            return False
        if self.process:
            return ipc.wait(self.process, timeout=0) is None
        return False

    def kill(self, soft=False):
        return ipc.kill(self.process, soft=soft)

    def wait(self, timeout=None):
        return ipc.wait(self.process, timeout=timeout)

    def join(self):
        self.exitcode = self.wait()

        if self.stdin_termios:
            ipc.restore_termios(sys.stdin, self.stdin_termios)

        if self.pipe:
            try:
                self.pipe.close()
            except Exception:  # pragma: no cover
                pass
            finally:
                self.pipe = None


# set when the current process is being monitored
_reloader_proxy = None


def get_reloader():
    """
    Get a reference to the current :class:`hupper.interfaces.IReloaderProxy`.

    Raises a ``RuntimeError`` if the current process is not actively being
    monitored by a parent process.

    """
    if _reloader_proxy is None:
        raise RuntimeError('process is not controlled by hupper')
    return _reloader_proxy


def is_active():
    """
    Return ``True`` if the current process being monitored by a parent process.

    """
    return _reloader_proxy is not None


class ReloaderProxy(IReloaderProxy):
    def __init__(self, pipe):
        self.pipe = pipe

    def watch_files(self, files):
        files = [os.path.abspath(f) for f in files]
        self.pipe.send(('watch_files', files))

    def trigger_reload(self):
        self.pipe.send(('reload',))

    def graceful_shutdown(self):
        self.pipe.send(('graceful_shutdown',))


def watch_control_pipe(pipe):
    def handle_packet(packet):
        if packet is None:
            interrupt_main()

    pipe.activate(handle_packet)


def worker_main(spec, pipe, spec_args=None, spec_kwargs=None):
    if spec_args is None:
        spec_args = []
    if spec_kwargs is None:
        spec_kwargs = {}

    # activate the pipe after forking
    watch_control_pipe(pipe)

    # SIGHUP is not supported on windows
    if hasattr(signal, 'SIGHUP'):
        signal.signal(signal.SIGHUP, signal.SIG_IGN)

    # disable pyc files for project code because it can cause timestamp
    # issues in which files are reloaded twice
    sys.dont_write_bytecode = True

    global _reloader_proxy
    _reloader_proxy = ReloaderProxy(pipe)

    poller = WatchSysModules(_reloader_proxy.watch_files)
    poller.daemon = True
    poller.start()

    # import the worker path before polling sys.modules
    func = resolve_spec(spec)

    # start the worker
    try:
        func(*spec_args, **spec_kwargs)
    except BaseException:  # catch any error
        try:
            # add files from the traceback before crashing
            poller.search_traceback(sys.exc_info()[2])
        except Exception:  # pragma: no cover
            pass
        raise
    finally:
        try:
            # attempt to send imported paths to the reloader process prior to
            # closing
            poller.update_paths()
            poller.stop()
            poller.join()
        except Exception:  # pragma: no cover
            pass
07070100000021000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001400000000hupper-1.12.1/tests07070100000022000081A400000000000000000000000165B376D000000000000000000000000000000000000000000000002000000000hupper-1.12.1/tests/__init__.py07070100000023000081A400000000000000000000000165B376D0000005F8000000000000000000000000000000000000002000000000hupper-1.12.1/tests/conftest.pyfrom __future__ import print_function
import pytest
import sys

from . import util


def err(msg):  # pragma: no cover
    print(msg, file=sys.stderr)


@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
    # execute all other hooks to obtain the report object
    outcome = yield
    rep = outcome.get_result()

    # set an report attribute for each phase of a call, which can
    # be "setup", "call", "teardown"
    setattr(item, "rep_" + rep.when, rep)


@pytest.fixture
def testapp(request):
    app = util.TestApp()
    try:
        yield app
    finally:
        app.stop()
    if (
        request.node.rep_call.failed and app.exitcode is not None
    ):  # pragma: no cover
        err(
            '-- test app failed --\nname=%s\nargs=%s\ncode=%s'
            % (app.name, app.args, app.exitcode)
        )
        err('-- stdout --\n%s' % app.stdout)
        err('-- stderr --\n%s' % app.stderr)


class DummyLogger:
    def __init__(self):
        self.messages = []

    def reset(self):
        self.messages = []

    def get_output(self, *levels):
        if not levels:
            levels = {'info', 'error', 'debug'}
        return '\n'.join(msg for lvl, msg in self.messages if lvl in levels)

    def info(self, msg):
        self.messages.append(('info', msg))

    def error(self, msg):
        self.messages.append(('error', msg))

    def debug(self, msg):
        self.messages.append(('debug', msg))


@pytest.fixture
def logger():
    return DummyLogger()
07070100000024000041ED00000000000000000000000265B376D000000000000000000000000000000000000000000000001A00000000hupper-1.12.1/tests/myapp07070100000025000081A400000000000000000000000165B376D0000000FB000000000000000000000000000000000000002600000000hupper-1.12.1/tests/myapp/__init__.pyimport pytest_cov.embed
import signal
import sys


def cleanup(*args, **kwargs):  # pragma: no cover
    # see https://github.com/pytest-dev/pytest-cov/issues/139
    pytest_cov.embed.cleanup()
    sys.exit(1)


signal.signal(signal.SIGTERM, cleanup)
07070100000026000081A400000000000000000000000165B376D000000045000000000000000000000000000000000000002600000000hupper-1.12.1/tests/myapp/__main__.pyimport sys

from .cli import main

sys.exit(main(sys.argv[1:]) or 0)
07070100000027000081A400000000000000000000000165B376D000000847000000000000000000000000000000000000002100000000hupper-1.12.1/tests/myapp/cli.pyimport argparse
import os
import sys
import time

import hupper

here = os.path.dirname(__file__)


def parse_options(args):
    parser = argparse.ArgumentParser()
    parser.add_argument('--reload', action='store_true')
    parser.add_argument('--callback-file')
    parser.add_argument(
        '--watch-file', action='append', dest='watch_files', default=[]
    )
    parser.add_argument('--watchman', action='store_true')
    parser.add_argument('--watchdog', action='store_true')
    parser.add_argument('--poll', action='store_true')
    parser.add_argument('--poll-interval', type=int)
    parser.add_argument('--reload-interval', type=int)
    parser.add_argument('--shutdown-interval', type=int)
    return parser.parse_args(args)


def main(args=None):
    if args is None:
        args = sys.argv[1:]
    opts = parse_options(args)
    if opts.reload:
        kw = {}
        if opts.poll:
            from hupper.polling import PollingFileMonitor

            pkw = {}
            if opts.poll_interval:
                pkw['poll_interval'] = opts.poll_interval
            kw['monitor_factory'] = lambda cb: PollingFileMonitor(cb, **pkw)

        if opts.watchdog:
            from hupper.watchdog import WatchdogFileMonitor

            kw['monitor_factory'] = WatchdogFileMonitor

        if opts.watchman:
            from hupper.watchman import WatchmanFileMonitor

            kw['monitor_factory'] = WatchmanFileMonitor

        if opts.reload_interval is not None:
            kw['reload_interval'] = opts.reload_interval

        if opts.shutdown_interval is not None:
            kw['shutdown_interval'] = opts.shutdown_interval

        hupper.start_reloader(__name__ + '.main', **kw)

    if hupper.is_active():
        hupper.get_reloader().watch_files([os.path.join(here, 'foo.ini')])
        hupper.get_reloader().watch_files(opts.watch_files)

    if opts.callback_file:
        with open(opts.callback_file, 'ab') as fp:
            fp.write('{:d}\n'.format(int(time.time())).encode('utf8'))
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
07070100000028000081A400000000000000000000000165B376D000000000000000000000000000000000000000000000002200000000hupper-1.12.1/tests/myapp/foo.ini07070100000029000081A400000000000000000000000165B376D000000135000000000000000000000000000000000000002000000000hupper-1.12.1/tests/test_cli.pyimport argparse
import pytest

from hupper.cli import interval_parser


@pytest.mark.parametrize('value', ['0', "-1"])
def test_interval_parser_errors(value):
    with pytest.raises(argparse.ArgumentTypeError):
        interval_parser(value)


def test_interval_parser():
    assert interval_parser("5") == 5
0707010000002A000081A400000000000000000000000165B376D000000277000000000000000000000000000000000000002000000000hupper-1.12.1/tests/test_ipc.pyimport queue

from hupper.ipc import Pipe, spawn


def echo(pipe):
    q = queue.Queue()
    pipe.activate(q.put)
    msg = q.get()
    while msg is not None:
        pipe.send(msg)
        msg = q.get()
    pipe.close()


def test_ipc_close():
    c1, c2 = Pipe()
    c1_q = queue.Queue()
    c1.activate(c1_q.put)

    with spawn(
        __name__ + '.echo',
        kwargs={"pipe": c2},
        pass_fds=[c2.r_fd, c2.w_fd],
    ) as proc:
        try:
            c2.close()

            c1.send("hello world")
            assert c1_q.get() == "hello world"

            c1.close()
        finally:
            proc.terminate()
0707010000002B000081A400000000000000000000000165B376D0000002E0000000000000000000000000000000000000001F00000000hupper-1.12.1/tests/test_it.pyimport os.path
import time

from . import util

here = os.path.abspath(os.path.dirname(__file__))


def test_myapp_reloads_when_touching_ini(testapp):
    testapp.start('myapp', ['--reload'])
    testapp.wait_for_response()
    time.sleep(2)
    util.touch(os.path.join(here, 'myapp/foo.ini'))
    testapp.wait_for_response()
    testapp.stop()

    assert len(testapp.response) == 2
    assert testapp.stderr != ''


def test_myapp_reloads_when_touching_pyfile(testapp):
    testapp.start('myapp', ['--reload'])
    testapp.wait_for_response()
    time.sleep(2)
    util.touch(os.path.join(here, 'myapp/cli.py'))
    testapp.wait_for_response()
    testapp.stop()

    assert len(testapp.response) == 2
    assert testapp.stderr != ''
0707010000002C000081A400000000000000000000000165B376D000000C24000000000000000000000000000000000000002500000000hupper-1.12.1/tests/test_reloader.pyimport os

here = os.path.abspath(os.path.dirname(__file__))


class DummyCallback:
    called = False

    def __call__(self, paths):
        self.called = paths


def make_proxy(monitor_factory, callback, logger):
    from hupper.reloader import FileMonitorProxy

    proxy = FileMonitorProxy(callback, logger)
    proxy.monitor = monitor_factory(proxy.file_changed)
    return proxy


def test_proxy_proxies(logger):
    class DummyMonitor(object):
        started = stopped = joined = False

        def __call__(self, cb, **kw):
            self.cb = cb
            return self

        def start(self):
            self.started = True

        def stop(self):
            self.stopped = True

        def join(self):
            self.joined = True

    cb = DummyCallback()
    monitor = DummyMonitor()
    proxy = make_proxy(monitor, cb, logger)
    assert monitor.cb
    assert not monitor.started and not monitor.stopped and not monitor.joined
    proxy.start()
    assert monitor.started and not monitor.stopped and not monitor.joined
    proxy.stop()
    assert monitor.stopped and monitor.joined


def test_proxy_expands_paths(tmpdir, logger):
    class DummyMonitor(object):
        def __call__(self, cb, **kw):
            self.cb = cb
            self.paths = []
            return self

        def add_path(self, path):
            self.paths.append(path)

    cb = DummyCallback()
    monitor = DummyMonitor()
    proxy = make_proxy(monitor, cb, logger)
    proxy.add_path('foo')
    assert monitor.paths == ['foo']

    tmpdir.join('foo.txt').ensure()
    tmpdir.join('bar.txt').ensure()
    rootdir = tmpdir.strpath
    monitor.paths = []
    proxy.add_path(os.path.join(rootdir, '*.txt'))
    assert sorted(monitor.paths) == [
        os.path.join(rootdir, 'bar.txt'),
        os.path.join(rootdir, 'foo.txt'),
    ]


def test_proxy_tracks_changes(logger):
    class DummyMonitor(object):
        def __call__(self, cb, **kw):
            self.cb = cb
            return self

    cb = DummyCallback()
    monitor = DummyMonitor()
    proxy = make_proxy(monitor, cb, logger)
    monitor.cb('foo.txt')
    assert cb.called == {'foo.txt'}
    out = logger.get_output('info')
    assert out == 'foo.txt changed; reloading ...'
    logger.reset()
    monitor.cb('foo.txt')
    out = logger.get_output('info')
    assert out == ''
    logger.reset()
    cb.called = False
    proxy.clear_changes()
    monitor.cb('foo.txt')
    out = logger.get_output('info')
    assert out == 'foo.txt changed; reloading ...'
    logger.reset()


def test_ignore_files():
    class DummyMonitor(object):
        paths = set()

        def add_path(self, path):
            self.paths.add(path)

    from hupper.reloader import FileMonitorProxy

    cb = DummyCallback()
    proxy = FileMonitorProxy(cb, None, {'/a/*'})
    monitor = proxy.monitor = DummyMonitor()

    path = 'foo.txt'
    assert path not in monitor.paths
    proxy.add_path(path)
    assert path in monitor.paths

    path = '/a/foo.txt'
    assert path not in monitor.paths
    proxy.add_path(path)
    assert path not in monitor.paths
0707010000002D000081A400000000000000000000000165B376D000000AC7000000000000000000000000000000000000001C00000000hupper-1.12.1/tests/util.pyimport os
import subprocess
import sys
import tempfile
import threading
import time

here = os.path.abspath(os.path.dirname(__file__))


class TestApp(threading.Thread):
    name = None
    args = None
    stdin = None
    daemon = True

    def __init__(self):
        super(TestApp, self).__init__()
        self.exitcode = None
        self.process = None
        self.tmpfile = None
        self.tmpsize = 0
        self.response = None
        self.stdout, self.stderr = b'', b''

    def start(self, name, args):
        self.name = name
        self.args = args or []

        fd, self.tmpfile = tempfile.mkstemp()
        os.close(fd)
        touch(self.tmpfile)
        self.tmpsize = os.path.getsize(self.tmpfile)
        self.response = readfile(self.tmpfile)
        super(TestApp, self).start()

    def run(self):
        cmd = [sys.executable, '-m', 'tests.' + self.name]
        if self.tmpfile:
            cmd += ['--callback-file', self.tmpfile]
        cmd += self.args

        env = os.environ.copy()
        env['PYTHONUNBUFFERED'] = '1'

        self.process = subprocess.Popen(
            cmd,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            env=env,
            universal_newlines=True,
        )
        try:
            self.stdout, self.stderr = self.process.communicate(self.stdin)
        finally:
            self.exitcode = self.process.wait()

    def is_alive(self):
        return self.process is not None and self.exitcode is None

    def stop(self):
        if self.is_alive():
            self.process.kill()
        self.join()

        if self.tmpfile:
            os.unlink(self.tmpfile)
            self.tmpfile = None

    def wait_for_response(self, timeout=5, interval=0.1):
        self.tmpsize = wait_for_change(
            self.tmpfile,
            last_size=self.tmpsize,
            timeout=timeout,
            interval=interval,
        )
        self.response = readfile(self.tmpfile)


def touch(fname, times=None):
    with open(fname, 'a'):
        os.utime(fname, times)


def readfile(path):
    with open(path, 'rb') as fp:
        return fp.readlines()


def wait_for_change(path, last_size=0, timeout=5, interval=0.1):
    start = time.time()
    size = os.path.getsize(path)
    while size == last_size:
        duration = time.time() - start
        sleepfor = interval
        if timeout is not None:  # pragma: no cover
            if duration >= timeout:
                raise RuntimeError(
                    'timeout waiting for change to file=%s' % (path,)
                )
            sleepfor = min(timeout - duration, sleepfor)
        time.sleep(sleepfor)
        size = os.path.getsize(path)
    return size
0707010000002E000081A400000000000000000000000165B376D0000005CD000000000000000000000000000000000000001600000000hupper-1.12.1/tox.ini[tox]
envlist =
    lint,
    py37,py38,py39,py310,py311,py312,pypy3,
    docs,coverage

isolated_build = true

requires =
    pip >= 19

[testenv]
commands =
    py.test --cov --cov-report= {posargs:}

setenv =
    COVERAGE_FILE=.coverage.{envname}

extras =
    testing

[testenv:coverage]
skip_install = true
commands =
    coverage combine
    coverage report
deps =
    coverage
setenv =
    COVERAGE_FILE=.coverage

[testenv:docs]
allowlist_externals =
    make
commands =
    make -C docs html BUILDDIR={envdir} SPHINXOPTS="-W -E"
extras =
    docs

[testenv:lint]
skip_install = True
commands =
    isort --check-only --df src/hupper tests setup.py
    black --check --diff src/hupper tests setup.py
    flake8 src/hupper tests setup.py
    check-manifest
    # build sdist/wheel
    python -m build .
    twine check dist/*
deps =
    black
    build
    check-manifest
    flake8
    flake8-bugbear
    isort
    readme_renderer
    twine

[testenv:format]
skip_install = true
commands =
    isort src/hupper tests setup.py
    black src/hupper tests setup.py
deps =
    black
    isort

[testenv:build]
skip_install = true
commands =
    # clean up build/ and dist/ folders
    python -c 'import shutil; shutil.rmtree("build", ignore_errors=True)'
    # Make sure we aren't forgetting anything
    check-manifest
    # build sdist/wheel
    python -m build .
    # Verify all is well
    twine check dist/*

deps =
    build
    check-manifest
    readme_renderer
    twine
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!209 blocks
openSUSE Build Service is sponsored by