File sambacc-v0.6+git.60.2f89a38.obscpio of Package python-sambacc

07070100000000000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002200000000sambacc-v0.6+git.60.2f89a38/.copr07070100000001000081A4000000000000000000000001684BE19C00000258000000000000000000000000000000000000002B00000000sambacc-v0.6+git.60.2f89a38/.copr/Makefile

SELF=$(lastword $(MAKEFILE_LIST))
ROOT_DIR=$(abspath $(dir $(SELF))/..)
SKIP_DEPS=

outdir:=/var/tmp/copr-tmp-outdir
spec:=extras/python-sambacc.spec

.PHONY: srpm
srpm: sys_deps
	mkdir -p $(outdir)
	git fetch --tags
	SAMBACC_SRPM_ONLY=yes \
		SAMBACC_BUILD_DIR=$(ROOT_DIR) \
		SAMBACC_DIST_PREFIX=$(outdir)/.dist \
		SAMBACC_DISTNAME=copr \
		SAMBACC_BUILD_TASKS="task_py_build task_rpm_build" \
		 ./tests/container/build.sh
	cp $(outdir)/.dist/copr/SRPMS/*.rpm  $(outdir)


.PHONY: sys_deps
sys_deps:
ifeq ($(SKIP_DEPS),yes)
	@echo "Skipping sys deps"
else
	dnf install -y python3-pip git
endif
07070100000002000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002400000000sambacc-v0.6+git.60.2f89a38/.github07070100000003000081A4000000000000000000000001684BE19C00000C53000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/.github/mergify.yml---
# each test should be listed separately, do not use regular expressions:
# https://docs.mergify.io/conditions.html#validating-all-status-check
# TODO: Use mergify's recently added 'shared configuration support'
# to dedup some of the check-x=y repetition in the future.
queue_rules:
  - name: default
    conditions:
      - check-success=check-commits
      - check-success=test (fedora-latest)
      - check-success=test (fedora-previous)
      - check-success=test (centos-stream9)
      - check-success=dpulls
    merge_method: rebase
    update_method: rebase


pull_request_rules:
  # Clearing approvals after content changes
  - name: Remove outdated approvals
    conditions:
      - base=master
    actions:
      dismiss_reviews:
        approved: true
        changes_requested: false
  # Perform automatic merge on conditions
  - name: Automatic merge on approval
    conditions:
      - check-success=check-commits
      - check-success=test (fedora-latest)
      - check-success=test (fedora-previous)
      - check-success=test (centos-stream9)
      - check-success=dpulls
      - "-draft"
      # Contributors should set the 'do-not-merge' label if they don't want
      # the PR to be (auto)merged for some reason.
      - "label!=do-not-merge"
      # A reviewer should set a label starting with 'review-in-progress' (and
      # suffixed by their username) in order to indicate a detailed review has
      # been started and not completed. This will hold the PR until the
      # label has been removed.
      - "-label~=^review-in-progress"
      - "base=master"
      # Even if there are 2 or more approvals we won't automerge if there are
      # any changes requested.
      - "#changes-requested-reviews-by=0"
      - or:
        # Any contributor's PR can be automerged with 2 (or more) reviews.
        - "#approved-reviews-by>=2"
        # A maintainer's contribution that has already aged long enough to
        # earn the "priority-review" label can be merged immediately.
        # The label can also be applied manually in case of an important
        # bugfix, etc.
        - and:
          - "label=priority-review"
          - "author=@maintainers"
          - "#approved-reviews-by>=1"
    actions:
      queue: {}
      dismiss_reviews: {}
  # Conflict resolution prompt
  - name: Ask contributor to resolve a conflict
    conditions:
      - conflict
    actions:
      comment:
        message: "This pull request now has conflicts with the target branch.
        Please resolve these conflicts and force push the updated branch."
  # Label PRs that have been sitting there unchanged, aging like a fine wine
  #
  # NOTE: the updated-at "counter" resets every time the PR is changed so
  # reacting to a reviewer's feedback and fixing a typo (for example) will
  # reset the counter. Thus we now apply a label once we hit the 15 day window
  # so that we know that PR had, at some time, sat unchanged for that long.
  - name: Label aged PRs
    conditions:
      - "updated-at<15 days ago"
      - "-draft"
      - "-closed"
      - "-merged"
    actions:
      label:
        add:
          - "priority-review"
07070100000004000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/.github/workflows07070100000005000081A4000000000000000000000001684BE19C00000AB4000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/.github/workflows/ci.yml---
name: CI

on:
  push:
    branches: [master]
  pull_request:
    branches: [master]
  schedule:
    - cron: 1 1 * * *

jobs:
  fedora-versions:
    runs-on: ubuntu-latest
    steps:
      - id: fedora-versions
        run: |
          curl -s -L https://fedoraproject.org/releases.json -o fedora-releases.json
          LATEST=$(jq -r '[.[]|select(.variant == "Container" and .subvariant == "Container_Base" and .arch == "x86_64")][0]|.version' fedora-releases.json)
          PREVIOUS=$((LATEST - 1))

          echo "latest=$LATEST" >> $GITHUB_OUTPUT
          echo "previous=$PREVIOUS" >> $GITHUB_OUTPUT
    outputs:
      latest: ${{ steps.fedora-versions.outputs.latest }}
      previous: ${{ steps.fedora-versions.outputs.previous }}
  check-commits:
    runs-on: ubuntu-latest
    if: github.event_name == 'pull_request'
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: actions/setup-python@v4
      - name: Install tox
        run: python -m pip install tox
      - name: Run gitlint
        run: tox -e gitlint
  test:
    needs: fedora-versions
    runs-on: ubuntu-latest
    strategy:
      fail-fast: false
      matrix:
        test_distro: ["fedora-previous", "fedora-latest", "centos-stream9"]
        include:
          - test_distro: "fedora-previous"
            base_image: "registry.fedoraproject.org/fedora:${{ needs.fedora-versions.outputs.previous }}"
          - test_distro: "fedora-latest"
            base_image: "registry.fedoraproject.org/fedora:${{ needs.fedora-versions.outputs.latest }}"
          - test_distro: "centos-stream9"
            base_image: "quay.io/centos/centos:stream9"
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0
      - name: Build test container
        run: docker build -t sambacc:ci-${{ matrix.test_distro }} --build-arg=SAMBACC_BASE_IMAGE=${{ matrix.base_image }}  tests/container/ -f tests/container/Containerfile
      - name: Run test container
        run: docker run -v $PWD:/var/tmp/build/sambacc sambacc:ci-${{ matrix.test_distro }}

  push:
    needs: [test]
    runs-on: ubuntu-latest
    if: (github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'samba-in-kubernetes/sambacc'
    steps:
      - uses: actions/checkout@v4
      - name: log in to quay.io
        run: docker login -u "${{ secrets.QUAY_USER }}" -p "${{ secrets.QUAY_PASS }}" quay.io
      - name: build container image
        run: docker build -t quay.io/samba.org/sambacc:latest tests/container -f tests/container/Containerfile
      - name: publish container image
        run: docker push quay.io/samba.org/sambacc:latest
07070100000006000081A4000000000000000000000001684BE19C0000005C000000000000000000000000000000000000002700000000sambacc-v0.6+git.60.2f89a38/.gitignore.venv
.tox
.egg-info
__pycache__
htmlcov
*.swp
dist/
build/
.mypy_cache
sambacc/_version.py
07070100000007000081A4000000000000000000000001684BE19C00001207000000000000000000000000000000000000002500000000sambacc-v0.6+git.60.2f89a38/.gitlint# Edit this file as you like.
#
# All these sections are optional. Each section with the exception of [general] represents
# one rule and each key in it is an option for that specific rule.
#
# Rules and sections can be referenced by their full name or by id. For example
# section "[body-max-line-length]" could also be written as "[B1]". Full section names are
# used in here for clarity.
#
[general]
# Ignore certain rules, this example uses both full name and id
# ignore=title-trailing-punctuation, T3

# verbosity should be a value between 1 and 3, the commandline -v flags take precedence over this
verbosity=3

# By default gitlint will ignore merge, revert, fixup and squash commits.
ignore-merge-commits=true
# ignore-revert-commits=true
# ignore-fixup-commits=true
# ignore-squash-commits=true

# Ignore any data send to gitlint via stdin
# ignore-stdin=true

# Fetch additional meta-data from the local repository when manually passing a
# commit message to gitlint via stdin or --commit-msg. Disabled by default.
# staged=true

# Enable debug mode (prints more output). Disabled by default.
# debug=true

# Enable search regex and remove warning message.
regex-style-search=true

# Enable community contributed rules
# See http://jorisroovers.github.io/gitlint/contrib_rules for details
contrib=contrib-body-requires-signed-off-by

# Set the extra-path where gitlint will search for user defined rules
# See http://jorisroovers.github.io/gitlint/user_defined_rules for details
# extra-path=examples/

# This is an example of how to configure the "title-max-length" rule and
# set the line-length it enforces to 80
[title-max-length]
line-length=72

# Conversely, you can also enforce minimal length of a title with the
# "title-min-length" rule:
# [title-min-length]
# min-length=5

[title-must-not-contain-word]
# Comma-separated list of words that should not occur in the title. Matching is case
# insensitive. It's fine if the keyword occurs as part of a larger word (so "WIPING"
# will not cause a violation, but "WIP: my title" will.
words=wip,WIP

[title-match-regex]
# python-style regex that the commit-msg title must match
# Note that the regex can contradict with other rules if not used correctly
# (e.g. title-must-not-contain-word).
regex=^.{2,32}: .*

# [body-max-line-length]
# line-length=72

# [body-min-length]
# min-length=5

# [body-is-missing]
# Whether to ignore this rule on merge commits (which typically only have a title)
# default = True
# ignore-merge-commits=false

# [body-changed-file-mention]
# List of files that need to be explicitly mentioned in the body when they are changed
# This is useful for when developers often erroneously edit certain files or git submodules.
# By specifying this rule, developers can only change the file when they explicitly reference
# it in the commit message.
# files=gitlint/rules.py,README.md

# [body-match-regex]
# python-style regex that the commit-msg body must match.
# E.g. body must end in My-Commit-Tag: foo
# regex=My-Commit-Tag: foo$

# [author-valid-email]
# python-style regex that the commit author email address must match.
# For example, use the following regex if you only want to allow email addresses from foo.com
# regex=[^@]+@foo.com

# [ignore-by-title]
# Ignore certain rules for commits of which the title matches a regex
# E.g. Match commit titles that start with "Release"
# regex=^Release(.*)

# Ignore certain rules, you can reference them by their id or by their full name
# Use 'all' to ignore all rules
# ignore=T1,body-min-length

# [ignore-by-body]
# Ignore certain rules for commits of which the body has a line that matches a regex
# E.g. Match bodies that have a line that that contain "release"
# regex=(.*)release(.*)
#
# Ignore certain rules, you can reference them by their id or by their full name
# Use 'all' to ignore all rules
# ignore=T1,body-min-length

[ignore-body-lines]
# Ignore certain lines in a commit body that match a regex.
# E.g. Ignore all lines that start with 'Co-Authored-By'
# regex=^Co-Authored-By

# ignore lines that are "footnotes", that start like `[1]: ` or `[2]: ` and so on
# this will make it easy to put long urls in commit messages without
# triggering gitlint body rules
regex=^\[[0-9]+\]:? +

# This is a contrib rule - a community contributed rule. These are disabled by default.
# You need to explicitly enable them one-by-one by adding them to the "contrib" option
# under [general] section above.
# [contrib-title-conventional-commits]
# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
# types = bugfix,user-story,epic
07070100000008000081A4000000000000000000000001684BE19C0000007A000000000000000000000000000000000000002600000000sambacc-v0.6+git.60.2f89a38/.hgignore.venv
.tox
.egg-info
__pycache__
htmlcov
\.coverage$
\.pytest_cache
\.swp$
^dist/
^build/
.mypy_cache
sambacc/_version.py
07070100000009000081A4000000000000000000000001684BE19C0000894D000000000000000000000000000000000000002400000000sambacc-v0.6+git.60.2f89a38/COPYING                    GNU GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU General Public License is a free, copyleft license for
software and other kinds of works.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.  We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors.  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights.  Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received.  You must make sure that they, too, receive
or can get the source code.  And you must show them these terms so they
know their rights.

  Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.

  For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software.  For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.

  Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so.  This is fundamentally incompatible with the aim of
protecting users' freedom to change the software.  The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable.  Therefore, we
have designed this version of the GPL to prohibit the practice for those
products.  If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.

  Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary.  To prevent this, the GPL assures that
patents cannot be used to render the program non-free.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Use with the GNU Affero General Public License.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:

    <program>  Copyright (C) <year>  <name of author>
    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.

  The GNU General Public License does not permit incorporating your program
into proprietary programs.  If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.  But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
0707010000000A000081A4000000000000000000000001684BE19C00000064000000000000000000000000000000000000002800000000sambacc-v0.6+git.60.2f89a38/MANIFEST.in# Include example files
recursive-include examples *
recursive-include sambacc/schema *.json *.yaml
0707010000000B000081A4000000000000000000000001684BE19C00001A35000000000000000000000000000000000000002600000000sambacc-v0.6+git.60.2f89a38/README.md# sambacc - A Samba Container Configuration Tool

## About

The sambacc project aims to consolidate and coordinate configuration of
[Samba](http://samba.org), and related components, when running in a
container. The configuration of one or many server instances can be
managed by the tool with the use of configuration files.  These
configuration files act as a superset of the well-known smb.conf, to
configure Samba, as well as other low level details of the container
environment.


## Rationale

Samba is a powerful and unique tool for implementing the SMB protocol and
a software stack to support it on unix-like systems. However, it's
potentially challenging to set up and manage many instances of Samba
by-hand, especially when running under a container orchestration system.

The idea behind sambacc is to automate much of the low level steps needed
to set up samba daemons, users, groups, and other supporting
components. The tool is also designed to consume configuration files that
can be used across many container instances. sambacc is written in Python
as samba provides Python bindings for some of the aspects of samba we need
to control.

The sambacc library and samba-container CLI command are used by the
[samba-container project](https://github.com/samba-in-kubernetes/samba-container/)
as part of the server container images.


## Usage

### File Server

The `samba-container` command is used to manage features related to
the Samba file server and closely related components.

Without any additional arguments `samba-container` prints the synthesised
samba (smb.conf) configuration based on the environment variables:
* `SAMBACC_CONFIG` - configuration file(s)
* `SAMBA_CONTAINER_ID` - Identity of this instance

Additionally, there are many other subcommands the tool supports. These include:
* `samba-container import` - Import smb.conf-style settings into registry
* `samba-container import-users` - Import users into /etc files and smb passdb
* `samba-container init` - Initialize the container environment for use
  by samba services
* `samba-container run <service>` - Initialize and run a named samba service

For a complete description of the subcommands supported, run:

```sh
samba-container --help
```

### Active Directory Domain Controller

The `samba-dc-container` command is used to manage features related to
the Samba AD DC server and related components.

Currently, `samba-dc-container` supports one subcommand. The `run` subcommand
is used to start an AD DC server. This command supports various setup steps
including steps to join an existing domain, provision a new domain,
populate a domain with stock users/groups, etc.

For a complete description of the subcommands supported, run:

```sh
samba-dc-container --help
```


## Features

* Abstracts away some of the nitty-gritty details about what Samba expects
  in its environment
* Imports specific smb.conf settings from "site wide" configuration files.
* Imports users and groups
* Starts smbd with container friendly settings
* Starts winbindd with container friendly settings
* Support for joining AD
* Support for managing CTDB clustering
* Support for creating/joining Samba Active Directory servers

### Major TODOs

A lot. Various things that are missing include:

* Features to perform more secure (password-less) domain joins
* Better integration (as opposed to unit) testing
* Better use of APIs versus executing CLI commands

Contributions and feedback would be very much appreciated.


## Install

The sambacc library, samba-container command, and samba-dc-container are
written assuming the software is being run within an OCI container environment.
While there's nothing stopping you from trying to install it on something else
the value of doing that will be rather limited.

The [samba-container
project](https://github.com/samba-in-kubernetes/samba-container) includes
sambacc and samba packages. If you are looking to use sambacc and not
contribute to it, that's probably what you want.

Builds of sambacc are continuously produced within our [COPR repository](https://copr.fedorainfracloud.org/coprs/phlogistonjohn/sambacc/).
These builds are then consumed by the container image builds.

Otherwise, the only method of install is from source control.

* Clone the repo: `git clone https://github.com/samba-in-kubernetes/sambacc`
* `cd sambacc`
* Install locally: `python -m pip install --user .`

The test & build container may also be used to build source tarballs and
wheels. Then you can distribute and install from the wheel if you need to.

### Testing

#### Local testing

To run the entire unit test suite locally install `tox` and run `tox` in
the repo root.

Because of the library and tooling that interacts with samba has some
system level dependencies, not all tests can be run locally in
isolated (virtualenv) environments.


#### Containerized testing

A more robust and isolated testing environment is provided in
the form of the sambacc container image.

The container file and other sources are available at ./tests/container in
the sambacc repo. This is the canonical way to run the test suite and is
what is used by the CI tests. When run this way certain system packages
can be installed, etc. to support running a wider range of test cases.

By default the container image is configured to check out sambacc master
branch and execute the tests and build python source distributions,
wheels, and RPM packages. You can test your local git checkout using the
image by mounting it at /var/tmp/build/sambacc (example: `podman run -v
$PWD:/var/tmp/build/sambacc sambacc:ci`).

To access the packages that are built using the container, mount a
directory into the container at "/srv/dist" and set the environment
variable `SAMBACC_DISTNAME` to a term of your choice (example: "latest").
This will then save the builds in a directory of that name in your output
directory.
Example:
```
$ mkdir -p $HOME/tmp/sambacc
$ podman run --rm \
  -v $HOME/tmp/sambacc:/srv/dist -e SAMBACC_DISTNAME=latest \
  quay.io/samba.org/sambacc:latest
$ ls $HOME/tmp/sambacc
latest
$ ls $HOME/tmp/sambacc/latest
sambacc-0.1.dev225+g10059ff-py3-none-any.whl  sha512sums
sambacc-0.1.dev225+g10059ff.tar.gz
```

You can combine the source directory mount and distribution directory
mount in one command to produce builds for your own local development work
if needed.

## License

GPLv3 as per the COPYING file.

This is the same license as used by Samba.


## Contributing/Contact

Patches, issues, comments, and questions are welcome.

Resources:
* [Issue tracker](https://github.com/samba-in-kubernetes/sambacc/issues)
* [Discussions board](https://github.com/samba-in-kubernetes/sambacc/discussions)
0707010000000C000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002100000000sambacc-v0.6+git.60.2f89a38/docs0707010000000D000081A4000000000000000000000001684BE19C0000348B000000000000000000000000000000000000003200000000sambacc-v0.6+git.60.2f89a38/docs/configuration.md
# JSON Configuration Format

Much of the behavior of sambacc is driven by the
configuration files. The following is a high level example of the JSON
structure and a description of these sections.

If sambacc is installed with the `yaml` extra it can support [YAML](#yaml)
based configuration files. If sambacc is installed with the `toml` extra it can
support [TOML](#toml) based configuration files. The JSON support is the
default and is always present.

```json
{
    "samba-container-config": "v0",
    "configs": {
        "config1": {
            "instance_name": "SAMBA",
            "instance_features": [],
            "shares": [
                "testshare"
            ],
            "globals": [
                "default"
            ]
        },
        "config2": {
            "instance_name": "MYDC1",
            "instance_features": [
                "addc"
            ],
            "domain_settings": "testdom"
        }
    },
    "shares": {
        "share": {
            "options": {
                "path": "/share",
                "valid users": "sambauser, otheruser"
            }
        },
        "share2": {
            "options": {
                "path": "/srv/data",
                "valid users": "sambauser, otheruser"
            },
            "permissions": {
                "method": "initialize-share-perms",
                "status_xattr": "user.share-perms-status",
                "mode": "0755"
            }
        }
    },
    "globals": {
        "default": {
            "options": {
                "security": "user",
                "server min protocol": "SMB2",
                "load printers": "no",
                "printing": "bsd",
                "printcap name": "/dev/null",
                "disable spoolss": "yes",
                "guest ok": "no"
            }
        }
    },
    "users": {
        "all_entries": [
            {
                "name": "sambauser",
                "password": "samba"
            },
            {
                "name": "bob",
                "uid": 2000,
                "gid": 2000,
                "password": "notSoSafe"
            },
            {
                "name": "alice",
                "uid": 2001,
                "gid": 2001,
                "nt_hash": "B784E584D34839235F6D88A5382C3821"
            }
        ]
    },
    "groups": {
        "all_entries": [
            {
                "name": "bob",
                "gid": 2000
            },
            {
                "name": "alice",
                "gid": 2001
            }
        ]
    },
    "domain_settings": {
        "testdom": {
            "realm": "DIMENSIONX.FOO.TEST",
            "short_domain": "DIMENSIONX",
            "admin_password": "Passw0rd"
        }
    },
    "domain_groups": {
        "testdom": [
            {
                "name": "friends"
            },
            {
                "name": "developers"
            }
        ]
    },
    "domain_users": {
        "testdom": [
            {
                "name": "jfoo",
                "password": "testing0nly.",
                "given_name": "Joe",
                "surname": "Foo",
                "member_of": [
                    "friends",
                    "developers"
                ]
            },
            {
                "name": "qbert",
                "password": "404knot-found",
                "given_name": "Quentin",
                "surname": "Bert",
                "member_of": [
                    "friends"
                ]
            }
        ]
    }
}
```
<!-- fellow vimmers:
    pipe above section `'<,'>!python -m json.tool` to keep neat -->

## The samba-container-config key

Every valid sambacc JSON configuration file contains the key
`samba-container-config` with a value in the form of a string vN were
N is the numeric version number. Currently, only "v0" exists.
This key-value combination allows us to support backwards-incompatible
configuration file format changes in the future.

## Configs Section

The `configs` section is a mapping of configuration names to top-level
configurations. A useable configuration file must have at least one
configuration, but more than one is supported.

Each configuration section is as follows:
* `instance_name` - String. A name for the configuration instance. Used for
  Samba's server (netbios) name. Valid for all configurations.
* `instance_features` - List of strings. Feature flags that alter the
  high level behavior of sambacc. Valid feature flags are: `CTDB`, `ADDC`.
* `shares` - List of strings. The names of one or more share config sections to
  include as part of the sambacc configuration. Valid only for file-server
  configurations (not supported for AD DC).
* `globals` - List of strings. The names of one or more global config sections
  to include as part of the sambacc configuration. Valid for all
  configurations.
* `domain_settings` - String. Name of the AD DC domain configuration. Required
  for AD DC configurations, invalid for all others.

The subsections under `configs` can be used to uniquely identify one server
"instance". Because those server instances may repeat the shares and samba
globals are defined in their own sections and then included in an
instance by referring to them in the `shares` and `globals` section
of these subsections.


## Shares Section

The `shares` section is a mapping of share names to a share-configuration block.
It is assumed that a configuration will have at least one share.

Each share configuration section is as follows:
* `options` - Mapping. The keys and values contained within are processed by
  sambacc and become part of the smb.conf (or functional equivalent)
  when running a Samba server.
* `permissions` - Permissions configuration section:
  * `method` - Permissions method. Known methods are:
    * `none` - Perform no permissions management
    * `initialize-share-perms` - Set share permissions only once. Track status in xattr.
    * `always-share-perms` - Always set share permissions.
  * `status_xattr` - Name of xattr to store status.
  * Remaining key-value pairs are method specific. Unknown keys are ignored.
  * `mode` - String that converts to octal. Unix permissions to set (`initialize-share-perms`, `always-share-perms`).


## Globals Section

The `globals` section is a mapping of named global configs to a
globals-configuration block. It is assumed that a configuration will have
at least one globals section.

Each globals configuration section is as follows:
* `options` - Mapping. The keys and values contained within are processed by
  sambacc and become part of the global values in smb.conf (or functional
  equivalent) when running a Samba server.

If a configuration section names more than one globals section. All of the
options within will be merged together to produce a single list of Samba
configuration globals.


## Users Section

The `users` section defines local users for a non-domain-member server
instance.

The `users` section supports one key, `all_entries`, which is a list of
user entries. Each user entry is as follows:
* `name` - The user's name.
* `password` - Optional. A plain-text password.
* `nt_hash` - Optional. An NT-Hashed password.
* `uid` - Optional integer. Specify the exact Unix UID the user should have.
* `gid` - Optional integer. Specify the exact Unix GID the user should have.

One of either `password` or `nt_hash` must be specified.

> **Warning**
> Do not consider `nt_hash`ed passwords as secure as the algorithm used to
> generate these hashes is weak (unsalted MD4). Use it only as a method to
> obscure the original password from casual viewers.

The NT-Hashed password can be generated by the following python snippet:
> hashlib.new('md4', password.encode('utf-16-le')).hexdigest().upper()

This may fail on some systems if the md4 hash has been disabled. Enabling
the hash is left as an exercise for the reader.


## Groups Section

The `groups` section defines local groups for a non-domain-member server
instance.

The `groups` section supports one key, `all_entries`, which is a list of
group entries. Each group entry is as follows:
* `name` - The user's name.
* `gid` - Optional integer. Specify the exact Unix GID the group should have.


## Domain Settings Section

The `domain_settings` sections defines configuration for AD DC
instances. The `domain_settings` section contains a mapping of domain
settings names to a domain-settings configuration block.

Each domain configuration section is as follows:
* `realm` - Name of the domain in kerberos realm form.
* `short_domain` - Optional. The short (nt-style) name of the domain.
* `admin_password` - The default password for the administrator user.
* `interfaces` - An optional subsection for dynamically configuring the network
  interfaces the domain controller will use. See below.

#### Interfaces Section

The interfaces section enables the sambacc tool to dynamically configure what
network interfaces will be enabled when the domain is provisioned.  On some
systems and in some environments there may be "bogus" network interfaces that
one does not want to enable the domain controller for. Examples include
interfaces related to virtualization or container engines that would cause the
DC to include a private or otherwise inaccessable IP to be included in the DNS
record(s) for the domain & domain controller.

The loopback device ("lo") is always enabled.

* `include_pattern` - Optional string. A regular expression that must match
  the name of an interface for that interface to be included.
  Example: `^eno[0-9]+$`
* `exclude_pattern` - Optional string. A regular expression that must not
  match the name of an interface for that interface to be included.
  The `exclude_pattern` option takes precedence over the `include_pattern`
  option.
  Example: `^(docker|virbr)[0-9]+$`

These options are intended to automate the act of examining a host's interfaces
prior to deployment and creating a list of suitable interfaces prior to setting
the "interfaces" and "bind interfaces only" parameters.  See the [Samba
Wiki page](https://wiki.samba.org/index.php/Setting_up_Samba_as_an_Active_Directory_Domain_Controller#Parameter_Reference)
for more details on this operation.


## Domain Groups Section

The `domain_groups` section defines initial groups that will be
automatically added to a newly provisioned domain. This section
is a mapping of the domain settings name to a list of domain group
entries.

A domain group entry is as follows:
* `name` - The name of the domain group.


## Domain Users Section
The `domain_users` section defines initial users that will be
automatically added to a newly provisioned domain. This section
is a mapping of the domain settings name to a list of domain user
entries.

A domain user entry is as follows:
* `name` - The name of the user.
* `surname` - A surname for the user.
* `given_name` - A given name for the user.
* `password` - A plain-text password.
* `member_of` - Optional. List of group names. The user will be added to the listed
  groups.


# YAML

The [YAML](https://yaml.org/) format may be used to configure sambacc when
PyYAML library is available. The YAML configuration is effectively converted to
JSON internally when processed. All of the documentation applying to the JSON
based configuration applies but in a somewhat easier to write format. The
filename must end with `.yaml` or `.yml` for sambacc to parse the file as YAML.

An example of a YAML based configuration file:
```yaml
samba-container-config: v0
# Define top-level configurations
configs:
  try2:
    globals: ["default"]
    shares:
      - "example"
      - "Other Name"
# Define Global Options
globals:
  default:
    options:
      load printers: "no"
      printing: "bsd"
      printcap name: "/dev/null"
      disable spoolss: "yes"
      guest ok: "no"
      security: "user"
      server min protocol: "SMB2"
# Define Shares
shares:
  example:
    options:
      path: /srv/a
      read only: "no"
  Other Name:
    options:
      path: /srv/b
      read only: "no"
# Define users
users:
  all_entries:
    - {"name": "sambauser", "password": "samba"}
    - {"name": "otheruser", "password": "insecure321"}
```

# TOML

The [TOML](https://toml.io/en/) format may be used to configure sambacc when
used on Python 3.11 or later or when the tomli library is available. The TOML
format may seem similar to the INI-style format used by Samba.  The TOML
configuration is effectively converted to JSON internally when processed. All
of the documentation applying to the JSON based configuration applies but in a
somewhat easier to read and write format. The filename must end with `.toml` for
sambacc to parse the file as TOML.

An example of a TOML based configuration file:
```toml
samba-container-config = "v0"

# Define top level configurations
[configs.try1]
globals = ["default"]
shares = ["example", "Other Name"]

# Define shares
[shares.example.options]
path = "/srv/a"
"read only" = "no"

[shares."Other Name".options]
path = "/srv/b"
"read only" = "no"

# Define global options
[globals.default.options]
"load printers" = "no"
printing = "bsd"
"printcap name" = "/dev/null"
"disable spoolss" = "yes"
"guest ok" = "no"
security = "user"
"server min protocol" = "SMB2"

# Define users
[[users.all_entries]]
name = "sambauser"
password = "samba"

[[users.all_entries]]
name = "otheruser"
password = "insecure321"
```
0707010000000E000081A4000000000000000000000001684BE19C00000FB2000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/docs/release-process.md# sambacc Release Process

## Preparation

Currently there is no dedicated branch for releases. sambacc is simple enough,
has few dependencies, and we're not planning on doing backports. Therefore
we apply release tags to the master branch.

```
git checkout master
git pull --ff-only
git tag -a -m 'Release v0.3' v0.3
```

This creates an annotated tag. Release tags must be annotated tags.

Perform a final check that all supported OSes build. You can
follow the commands below, which are based on the github workflows at the
time this document was written:

```
podman build --build-arg=SAMBACC_BASE_IMAGE=quay.io/centos/centos:stream9 -t sambacc:temp-centos9 tests/container/ -f tests/container/Containerfile
podman build --build-arg=SAMBACC_BASE_IMAGE=registry.fedoraproject.org/fedora:37 -t sambacc:temp-fc37 tests/container/ -f tests/container/Containerfile
podman build --build-arg=SAMBACC_BASE_IMAGE=registry.fedoraproject.org/fedora:38 -t sambacc:temp-fc38 tests/container/ -f tests/container/Containerfile

# name the last part after the release version
mybuild=$PWD/_builds/v03
mkdir -p $mybuild
# perform a combined test & build, that stores build artifacts under $mybuild/$SAMBACC_DISTNAME
podman run -v $PWD:/var/tmp/build/sambacc -v $mybuild:/srv/dist -e SAMBACC_DISTNAME=centos9 sambacc:temp-centos9
podman run -v $PWD:/var/tmp/build/sambacc -v $mybuild:/srv/dist -e SAMBACC_DISTNAME=fc37 sambacc:temp-fc37
podman run -v $PWD:/var/tmp/build/sambacc -v $mybuild:/srv/dist -e SAMBACC_DISTNAME=fc38 sambacc:temp-fc38

# view build results
ls -lR $mybuild
```

Modify the set of base OSes to match what is supported by the release. Check
that the logs show that tag version was correctly picked up by the build.
The python and rpm packages should indicate the new release version and not
include an "unreleased git version".

For at least one build, select a set of files that includes the source tarball,
the Python Wheel (.whl file), and a source RPM. Create or alter an existing
sha512sums file containing the sha512 hashes of these files.


## GitHub Release

When you are satisfied that the tagged version is suitable for release, you
can push the tag to the public repo:
```
git push --follow-tags
```

Manually trigger a COPR build. Confirm that new COPR build contains the correct
version number and doesn't include an "unreleased git version".
You will need to have a fedora account and the ability to trigger builds
for `phlogistonjohn/sambacc`.

Draft a new set of release notes. Select the recently pushed tag. Start with
the auto-generated release notes from github (activate the `Generate release
notes` button/link). Add an introductory section (see previous notes for an
example). Add a "Highlights" section if there are any notable features or fixes
in the release. The Highlights section can be skipped if the content of the
release is unremarkable (e.g. few changes occurred since the previous release).

Attach the source tarball, the Python Wheel, and one SRPM from the earlier
build(s), along with the sha512sums file to the release.

Perform a final round of reviews, as needed, for the release notes and then
publish the release.


## PyPI

There is a [sambacc repository on PyPI](https://pypi.org/project/sambacc/).
This exists mainly to reserve the sambacc name, however we desire to keep it up
to date too.  You will need to have a PyPI account and access to the sambacc
repo.

Log into PyPI web UI. (Re)Generate a pypi login token for sambacc.
Ensure `twine` is installed:
```
python3 -m pip install --upgrade twine
```

Create a directory to store the python build artifacts:
```
rm -rf _build/pypi
mkdir -p _build/pypi
cp sambacc-0.3.tar.gz sambacc-0.3-py3-none-any.whl _build/pypi
```
Upload the files to PyPI creating a new release:
```
python3 -m twine upload _build/pypi/*
# Supply a username of `__token__` and the password will be the value
of the token you acquiried above.
```

A new release like `https://pypi.org/project/sambacc/0.3/` should have become
available.
0707010000000F000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002500000000sambacc-v0.6+git.60.2f89a38/examples07070100000010000081A4000000000000000000000001684BE19C00000B9C000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/examples/addc.json{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "instance_features": ["addc"],
      "domain_settings": "sink",
      "instance_name": "dc1"
    }
  },
  "domain_settings": {
    "sink": {
      "realm": "DOMAIN1.SINK.TEST",
      "short_domain": "DOMAIN1",
      "admin_password": "Passw0rd"
    }
  },
  "domain_groups": {
    "sink": [
        {"name": "supervisors"},
        {"name": "employees"},
        {"name": "characters"},
        {"name": "bulk"}
    ]
  },
  "domain_users": {
    "sink": [
      {
        "name": "bwayne",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Wayne",
        "member_of": ["supervisors", "characters", "employees"]
      },
      {
        "name": "ckent",
        "password": "1115Rose.",
        "given_name": "Clark",
        "surname": "Kent",
        "member_of": ["characters", "employees"]
      },
      {
        "name": "bbanner",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Banner",
        "member_of": ["characters", "employees"]
      },
      {
        "name": "pparker",
        "password": "1115Rose.",
        "given_name": "Peter",
        "surname": "Parker",
        "member_of": ["characters", "employees"]
      },
      {
        "name": "user0",
        "password": "1115Rose.",
        "given_name": "George0",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user1",
        "password": "1115Rose.",
        "given_name": "George1",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user2",
        "password": "1115Rose.",
        "given_name": "George2",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user3",
        "password": "1115Rose.",
        "given_name": "George3",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user4",
        "password": "1115Rose.",
        "given_name": "George4",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user5",
        "password": "1115Rose.",
        "given_name": "George5",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user6",
        "password": "1115Rose.",
        "given_name": "George6",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user7",
        "password": "1115Rose.",
        "given_name": "George7",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user8",
        "password": "1115Rose.",
        "given_name": "George8",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user9",
        "password": "1115Rose.",
        "given_name": "George9",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      }
    ]
  }
}
07070100000011000081A4000000000000000000000001684BE19C00000C8A000000000000000000000000000000000000003200000000sambacc-v0.6+git.60.2f89a38/examples/addc_ou.json{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "instance_features": ["addc"],
      "domain_settings": "sink",
      "instance_name": "dc1"
    }
  },
  "domain_settings": {
    "sink": {
      "realm": "DOMAIN1.SINK.TEST",
      "short_domain": "DOMAIN1",
      "admin_password": "Passw0rd"
    }
  },
  "organizational_units": {
    "sink": [
      {"name": "employees"}
    ]
  },
  "domain_groups": {
    "sink": [
        {"name": "supervisors"},
        {
          "name": "employees",
          "ou": "employees"
        },
        {"name": "characters"},
        {"name": "bulk"}
    ]
  },
  "domain_users": {
    "sink": [
      {
        "name": "bwayne",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Wayne",
        "member_of": ["supervisors", "characters", "employees"],
        "ou": "employees"
      },
      {
        "name": "ckent",
        "password": "1115Rose.",
        "given_name": "Clark",
        "surname": "Kent",
        "member_of": ["characters", "employees"],
        "ou": "employees"
      },
      {
        "name": "bbanner",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Banner",
        "member_of": ["characters", "employees"],
        "ou": "employees"
      },
      {
        "name": "pparker",
        "password": "1115Rose.",
        "given_name": "Peter",
        "surname": "Parker",
        "member_of": ["characters", "employees"],
        "ou": "employees"
      },
      {
        "name": "user0",
        "password": "1115Rose.",
        "given_name": "George0",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user1",
        "password": "1115Rose.",
        "given_name": "George1",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user2",
        "password": "1115Rose.",
        "given_name": "George2",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user3",
        "password": "1115Rose.",
        "given_name": "George3",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user4",
        "password": "1115Rose.",
        "given_name": "George4",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user5",
        "password": "1115Rose.",
        "given_name": "George5",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user6",
        "password": "1115Rose.",
        "given_name": "George6",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user7",
        "password": "1115Rose.",
        "given_name": "George7",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user8",
        "password": "1115Rose.",
        "given_name": "George8",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      },
      {
        "name": "user9",
        "password": "1115Rose.",
        "given_name": "George9",
        "surname": "Hue-Sir",
        "member_of": ["bulk"]
      }
    ]
  }
}
07070100000012000081A4000000000000000000000001684BE19C0000038A000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/examples/ctdb.json{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "shares": [
        "share"
      ],
      "globals": [
        "default"
      ],
      "instance_features": ["ctdb"],
      "instance_name": "SAMBA"
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "read only": "no",
        "valid users": "sambauser, otheruser"
      }
    }
  },
  "globals": {
    "default": {
      "options": {
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
    "all_entries": [
      {
        "name": "sambauser",
        "password": "samba"
      },
      {
        "name": "otheruser",
        "password": "insecure321"
      }
    ]
  },
  "_footer": 1
}
07070100000013000081A4000000000000000000000001684BE19C000003F6000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/examples/example1.json{
  "samba-container-config": "v0",
  "configs": {
    "example1": {
      "shares": [
        "demonstration",
        "examples"
      ],
      "globals": [
        "global0"
      ],
      "instance_name": "SERV1"
    }
  },
  "shares": {
    "demonstration": {
      "options": {
        "path": "/mnt/demo"
      }
    },
    "examples": {
      "options": {
        "path": "/mnt/examples"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
    "all_entries": [
      {
        "name": "bob",
        "password": "notSoSafe"
      },
      {
        "name": "alice",
        "password": "123fakeStreet"
      },
      {
        "name": "carol",
        "nt_hash": "B784E584D34839235F6D88A5382C3821"
      }
    ]
  },
  "_footer": 1
}
07070100000014000081A4000000000000000000000001684BE19C0000034A000000000000000000000000000000000000003200000000sambacc-v0.6+git.60.2f89a38/examples/minimal.json{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "shares": [
        "share"
      ],
      "globals": [
        "default"
      ],
      "instance_name": "SAMBA"
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "valid users": "sambauser, otheruser"
      }
    }
  },
  "globals": {
    "default": {
      "options": {
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
    "all_entries": [
      {
        "name": "sambauser",
        "password": "samba"
      },
      {
        "name": "otheruser",
        "password": "insecure321"
      }
    ]
  },
  "_footer": 1
}
07070100000015000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002300000000sambacc-v0.6+git.60.2f89a38/extras07070100000016000081A4000000000000000000000001684BE19C00000989000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/extras/python-sambacc.spec%global bname sambacc
# set xversion to define the default version number
%define xversion 0.1
# set pversion for a customized python package version string
%{?!pversion: %define pversion %{xversion}}
# set rversion for a customized rpm version
%{?!rversion: %define rversion %{xversion}}


Name:           python-%{bname}
Version:        %{rversion}
Release:        1%{?dist}%{?vendordist}
Summary:        Samba Container Configurator

License:        GPLv3+
URL:            https://github.com/samba-in-kubernetes/sambacc
# sambacc is not released yet so we're leaving off the url for now
# once packaged and released we can update this field
Source:         %{bname}-%{pversion}.tar.gz

BuildArch:      noarch
BuildRequires:  python3-devel
# we need python3-samba as a build dependency in order to run
# the test suite
BuildRequires:  python3-samba
# ditto for the net binary
BuildRequires: /usr/bin/net

%global _description %{expand:
A Python library and set of CLI tools intended to act as a bridge between a container
environment and Samba servers and utilities. It aims to consolidate, coordinate and
automate all of the low level steps of setting up smbd, users, groups, and other
supporting components.
}

%description %_description

%package -n python3-%{bname}
Summary: %{summary}
# Distro requires that are technically optional for the lib
Requires: python3-samba
Requires: python3-pyxattr
%if 0%{?fedora} >= 37 || 0%{?rhel} >= 9
# Enable extras other than validation as the dependency needed
# is too old on centos/rhel 9.
Recommends: %{name}+toml
Recommends: %{name}+yaml
Recommends: %{name}+rados
Recommends: %{name}+grpc
%endif
%if 0%{?fedora} >= 37
Recommends: %{name}+validation
%endif

%description -n python3-%{bname}  %_description


%prep
%autosetup -n %{bname}-%{pversion}

%generate_buildrequires
%pyproject_buildrequires -e py3-sys


%build
%pyproject_wheel


%install
%pyproject_install
%pyproject_save_files %{bname}


%check
%tox -e py3-sys


%files -n python3-%{bname} -f %{pyproject_files}
%doc README.*
%{_bindir}/samba-container
%{_bindir}/samba-dc-container
%{_bindir}/samba-remote-control
%{_datadir}/%{bname}/examples/


%pyproject_extras_subpkg -n python3-%{bname} validation
%pyproject_extras_subpkg -n python3-%{bname} toml
%pyproject_extras_subpkg -n python3-%{bname} yaml
%pyproject_extras_subpkg -n python3-%{bname} rados
%pyproject_extras_subpkg -n python3-%{bname} grpc


%changelog
%autochangelog
07070100000017000081A4000000000000000000000001684BE19C000003A3000000000000000000000000000000000000002B00000000sambacc-v0.6+git.60.2f89a38/pyproject.toml[build-system]
requires = ["setuptools>=42", "wheel", "setuptools_scm>=6.0"]
build-backend = "setuptools.build_meta"

[tool.setuptools_scm]
fallback_version = "0.1"
write_to = "sambacc/_version.py"
write_to_template = """
# coding: utf-8
# Generated by setuptool_scm. Do not edit. Do not commit.
version = "{version}"
"""
# I wanted to save a 2nd var for the full hash, but it turns out there's no way
# to grab the full hash from version control and save it to the file at this
# time.

[tool.black]
line-length = 79
quiet = true

[tool.mypy]
disallow_incomplete_defs = true

[[tool.mypy.overrides]]
module = "sambacc.*"
disallow_untyped_defs = true

[[tool.mypy.overrides]]
module = "sambacc.commands.*"
disallow_untyped_defs = false

[[tool.mypy.overrides]]
module = "sambacc.schema.*"
disallow_untyped_defs = false

[[tool.mypy.overrides]]
module = "sambacc.grpc.generated.*"
disallow_untyped_defs = false
ignore_errors = true
07070100000018000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002400000000sambacc-v0.6+git.60.2f89a38/sambacc07070100000019000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/sambacc/__init__.py0707010000001A000081A4000000000000000000000001684BE19C00000997000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/sambacc/_xattr.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#
"""xattr shim module

This module exists to insulate sambacc from the platform xattr module.
Currently it only support pyxattr. This module can be imported without
pyxattr (xattr) present. The functions will import the required module
and raise an ImportError if xattr is not available.

This shim also provides a typed functions for xattr management. This
could have been accomplished by writing a pyi file for xattr but since
we need the runtime support we just add new functions.
"""


import pathlib
import typing

XAttrItem = typing.Union[
    int,  # an open file descriptor, not wrapped by an object
    pathlib.Path,  # pathlib path object
    str,  # basic path string
    typing.IO,  # an open file descriptor, wrapped by an object
]
Namespace = typing.Optional[bytes]


def get(
    item: XAttrItem,
    name: str,
    *,
    nofollow: bool = False,
    namespace: Namespace = None
) -> bytes:
    """Get an xattr from the target item and name.
    See docs for PyXattr module for details.
    """
    import xattr  # type: ignore

    kwargs: dict[str, typing.Any] = {"nofollow": nofollow}
    if namespace is not None:
        kwargs["namespace"] = namespace
    return xattr.get(item, name, **kwargs)


def set(
    item: XAttrItem,
    name: str,
    value: str,
    *,
    flags: typing.Optional[int] = None,
    nofollow: bool = False,
    namespace: Namespace = None
) -> None:
    """Set an xattr. See docs for PyXattr module for details."""
    import xattr  # type: ignore

    kwargs: dict[str, typing.Any] = {"nofollow": nofollow}
    if flags is not None:
        kwargs["flags"] = flags
    if namespace is not None:
        kwargs["namespace"] = namespace
    return xattr.set(item, name, value, **kwargs)
0707010000001B000081A4000000000000000000000001684BE19C00001923000000000000000000000000000000000000002C00000000sambacc-v0.6+git.60.2f89a38/sambacc/addc.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import logging
import re
import subprocess
import typing

from sambacc import config
from sambacc import samba_cmds

_logger = logging.getLogger(__name__)


def provision(
    realm: str,
    dcname: str,
    admin_password: str,
    dns_backend: typing.Optional[str] = None,
    domain: typing.Optional[str] = None,
    options: typing.Optional[typing.Iterable[tuple[str, str]]] = None,
) -> None:
    # this function is a direct translation of a previous shell script
    # as samba-tool is based on python libs, this function could possibly
    # be converted to import samba's libs and use that.
    _logger.info(f"Provisioning AD domain: realm={realm}")
    subprocess.check_call(
        _provision_cmd(
            realm,
            dcname,
            admin_password=admin_password,
            dns_backend=dns_backend,
            domain=domain,
            options=options,
        )
    )
    return


def join(
    realm: str,
    dcname: str,
    admin_password: str,
    dns_backend: typing.Optional[str] = None,
    domain: typing.Optional[str] = None,
    options: typing.Optional[typing.Iterable[tuple[str, str]]] = None,
) -> None:
    _logger.info(f"Joining AD domain: realm={realm}")
    subprocess.check_call(
        _join_cmd(
            realm,
            dcname,
            admin_password=admin_password,
            dns_backend=dns_backend,
            options=options,
        )
    )


def create_user(
    name: str,
    password: str,
    surname: typing.Optional[str],
    given_name: typing.Optional[str],
    ou: typing.Optional[str] = None,
) -> None:
    cmd = _user_create_cmd(name, password, surname, given_name, ou)
    _logger.info("Creating user: %r", name)
    subprocess.check_call(cmd)


def create_group(name: str, ou: typing.Optional[str] = None) -> None:
    cmd = _group_add_cmd(name, ou)
    _logger.info("Creating group: %r", name)
    subprocess.check_call(cmd)


def create_ou(name: str) -> None:
    cmd = _ou_add_cmd(name)
    _logger.info("Creating organizational unit: %r", name)
    subprocess.check_call(cmd)


def add_group_members(group_name: str, members: list[str]) -> None:
    cmd = _group_add_members_cmd(group_name, members)
    _logger.info("Adding group members: %r", cmd)
    subprocess.check_call(cmd)


def _filter_opts(
    options: typing.Optional[typing.Iterable[tuple[str, str]]]
) -> list[tuple[str, str]]:
    _skip_keys = ["netbios name"]
    options = options or []
    return [(k, v) for (k, v) in options if k not in _skip_keys]


def _provision_cmd(
    realm: str,
    dcname: str,
    admin_password: str,
    dns_backend: typing.Optional[str] = None,
    domain: typing.Optional[str] = None,
    options: typing.Optional[typing.Iterable[tuple[str, str]]] = None,
) -> list[str]:
    if not dns_backend:
        dns_backend = "SAMBA_INTERNAL"
    if not domain:
        domain = realm.split(".")[0].upper()
    cmd = samba_cmds.sambatool[
        "domain",
        "provision",
        f"--option=netbios name={dcname}",
        "--use-rfc2307",
        f"--dns-backend={dns_backend}",
        "--server-role=dc",
        f"--realm={realm}",
        f"--domain={domain}",
        f"--adminpass={admin_password}",
    ]
    cmd = cmd[
        [f"--option={okey}={oval}" for okey, oval in _filter_opts(options)]
    ]
    return cmd.argv()


def _join_cmd(
    realm: str,
    dcname: str,
    admin_password: str,
    dns_backend: typing.Optional[str] = None,
    domain: typing.Optional[str] = None,
    options: typing.Optional[typing.Iterable[tuple[str, str]]] = None,
) -> list[str]:
    if not dns_backend:
        dns_backend = "SAMBA_INTERNAL"
    if not domain:
        domain = realm.split(".")[0].upper()
    cmd = samba_cmds.sambatool[
        "domain",
        "join",
        realm,
        "DC",
        f"-U{domain}\\Administrator",
        f"--option=netbios name={dcname}",
        f"--dns-backend={dns_backend}",
        f"--password={admin_password}",
    ]
    cmd = cmd[
        [f"--option={okey}={oval}" for okey, oval in _filter_opts(options)]
    ]
    return cmd.argv()


def _user_create_cmd(
    name: str,
    password: str,
    surname: typing.Optional[str],
    given_name: typing.Optional[str],
    ou: typing.Optional[str],
) -> list[str]:
    cmd = samba_cmds.sambatool[
        "user",
        "create",
        name,
        password,
    ].argv()
    if surname:
        cmd.append(f"--surname={surname}")
    if given_name:
        cmd.append(f"--given-name={given_name}")
    if ou:
        cmd.append(f"--userou=OU={ou}")
    return cmd


def _group_add_cmd(name: str, ou: typing.Optional[str]) -> list[str]:
    cmd = samba_cmds.sambatool[
        "group",
        "add",
        name,
    ].argv()
    if ou:
        cmd.append(f"--groupou=OU={ou}")
    return cmd


def _ou_add_cmd(name: str) -> list[str]:
    cmd = samba_cmds.sambatool[
        "ou",
        "add",
        f"OU={name}",
    ].argv()
    return cmd


def _group_add_members_cmd(group_name: str, members: list[str]) -> list[str]:
    cmd = samba_cmds.sambatool[
        "group",
        "addmembers",
        group_name,
        ",".join(members),
    ].argv()
    return cmd


def _ifnames() -> list[str]:
    import socket

    return [iface for _, iface in socket.if_nameindex()]


def filtered_interfaces(
    ic: config.DCInterfaceConfig, ifnames: typing.Optional[list[str]] = None
) -> list[str]:
    _include = re.compile(ic.include_pattern or "^.*$")
    _exclude = re.compile(ic.exclude_pattern or "^$")
    if ifnames is None:
        ifnames = _ifnames()
    return [
        name
        for name in ifnames
        if (name == "lo")
        or (_include.match(name) and not _exclude.match(name))
    ]
0707010000001C000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002D00000000sambacc-v0.6+git.60.2f89a38/sambacc/commands0707010000001D000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000003900000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/__init__.py0707010000001E000081A4000000000000000000000001684BE19C000019BA000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/addc.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import logging
import os
import shutil
import typing

from sambacc import addc
from sambacc import samba_cmds
from sambacc import smbconf_api
from sambacc import smbconf_samba

from .cli import Context, Fail, best_waiter, commands

try:
    import dns
    import dns.resolver
    import dns.exception

    _DNS = True
except ImportError:
    _DNS = False


_logger = logging.getLogger(__name__)

_populated: str = "/var/lib/samba/POPULATED"
_provisioned: str = "/etc/samba/smb.conf"


@commands.command(name="summary")
def summary(ctx: Context) -> None:
    print("Hello", ctx)


_setup_choices = ["init-all", "provision", "populate", "wait-domain", "join"]


def _dosetup(ctx: Context, step_name: str) -> bool:
    setup = ctx.cli.setup or []
    return ("init-all" in setup) or (step_name in setup)


def _run_container_args(parser):
    parser.add_argument(
        "--setup",
        action="append",
        choices=_setup_choices,
        help=(
            "Specify one or more setup step names to preconfigure the"
            " container environment before the server process is started."
            " The special 'init-all' name will perform all known setup steps."
        ),
    )
    parser.add_argument(
        "--name",
        help="Specify a custom name for the dc, overriding the config file.",
    )


def _prep_provision(ctx: Context) -> None:
    if os.path.exists(_provisioned):
        _logger.info("Domain already provisioned")
        return
    domconfig = ctx.instance_config.domain()
    _logger.info(f"Provisioning domain: {domconfig.realm}")

    dcname = ctx.cli.name or domconfig.dcname
    prov_opts = list(ctx.instance_config.global_options())
    explicit_ifaces = "interfaces" in dict(prov_opts)
    if domconfig.interface_config.configured and not explicit_ifaces:
        # dynamically select interfaces from the system to pass to the
        # provisioning command
        _logger.info("Dynamic interface selection enabled")
        ifaces = addc.filtered_interfaces(domconfig.interface_config)
        _logger.info("Selected interfaces: %s", ifaces)
        prov_opts.append(("interfaces", " ".join(ifaces)))
        prov_opts.append(("bind interfaces only", "yes"))
    addc.provision(
        realm=domconfig.realm,
        domain=domconfig.short_domain,
        dcname=dcname,
        admin_password=domconfig.admin_password,
        options=prov_opts,
    )
    _merge_config(_provisioned, ctx.instance_config.global_options())


def _prep_join(ctx: Context) -> None:
    if os.path.exists(_provisioned):
        _logger.info("Already configured. Not joining")
        return
    domconfig = ctx.instance_config.domain()
    _logger.info(f"Provisioning domain: {domconfig.realm}")

    dcname = ctx.cli.name or domconfig.dcname
    addc.join(
        realm=domconfig.realm,
        domain=domconfig.short_domain,
        dcname=dcname,
        admin_password=domconfig.admin_password,
        options=ctx.instance_config.global_options(),
    )
    _merge_config(_provisioned, ctx.instance_config.global_options())


def _merge_config(
    smb_conf_path: str,
    options: typing.Optional[typing.Iterable[tuple[str, str]]] = None,
) -> None:
    if not options:
        return
    txt_conf = smbconf_samba.SMBConf.from_file(smb_conf_path)
    tmp_conf = smbconf_api.SimpleConfigStore()
    tmp_conf.import_smbconf(txt_conf)
    global_section = dict(tmp_conf["global"])
    global_section.update(options)
    tmp_conf["global"] = list(global_section.items())
    try:
        os.rename(smb_conf_path, f"{smb_conf_path}.orig")
    except OSError:
        pass
    with open(smb_conf_path, "w") as fh:
        smbconf_api.write_store_as_smb_conf(fh, tmp_conf)


def _prep_wait_on_domain(ctx: Context) -> None:
    if not _DNS:
        _logger.info("Can not query domain. Exiting.")
        raise Fail("no dns support available (missing dnsypthon)")

    realm = ctx.instance_config.domain().realm
    waiter = best_waiter(max_timeout=30)
    while True:
        _logger.info(f"checking for AD domain in dns: {realm}")
        try:
            dns.resolver.query(f"_ldap._tcp.{realm}.", "SRV")
            return
        except dns.exception.DNSException:
            _logger.info(f"dns record for {realm} not found")
            waiter.wait()


def _prep_populate(ctx: Context) -> None:
    if os.path.exists(_populated):
        _logger.info("populated marker exists")
        return
    _logger.info("Populating domain with default entries")

    for ou in ctx.instance_config.organizational_units():
        addc.create_ou(ou.ou_name)

    for dgroup in ctx.instance_config.domain_groups():
        addc.create_group(dgroup.groupname, dgroup.ou)

    for duser in ctx.instance_config.domain_users():
        addc.create_user(
            name=duser.username,
            password=duser.plaintext_passwd,
            surname=duser.surname,
            given_name=duser.given_name,
            ou=duser.ou,
        )
        # TODO: probably should improve this to avoid extra calls / loops
        for gname in duser.member_of:
            addc.add_group_members(group_name=gname, members=[duser.username])

    # "touch" the populated marker
    with open(_populated, "w"):
        pass


def _prep_krb5_conf(ctx: Context) -> None:
    shutil.copy("/var/lib/samba/private/krb5.conf", "/etc/krb5.conf")


@commands.command(name="run", arg_func=_run_container_args)
def run(ctx: Context) -> None:
    _logger.info("Running AD DC container")
    if _dosetup(ctx, "wait-domain"):
        _prep_wait_on_domain(ctx)
    if _dosetup(ctx, "join"):
        _prep_join(ctx)
    if _dosetup(ctx, "provision"):
        _prep_provision(ctx)
    if _dosetup(ctx, "populate"):
        _prep_populate(ctx)

    _prep_krb5_conf(ctx)
    _logger.info("Starting samba server")
    samba_cmds.execute(samba_cmds.samba_dc_foreground())
0707010000001F000081A4000000000000000000000001684BE19C0000056F000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/check.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from sambacc import samba_cmds, ctdb

from .cli import commands, Context, Fail


def _check_args(parser):
    parser.add_argument(
        "target",
        choices=["winbind", "ctdb-nodestatus"],
        help="Name of the target subsystem to check.",
    )


@commands.command(name="check", arg_func=_check_args)
def check(ctx: Context) -> None:
    """Check that a given subsystem is functioning."""
    if ctx.cli.target == "winbind":
        cmd = samba_cmds.wbinfo["--ping"]
        samba_cmds.execute(cmd)
    elif ctx.cli.target == "ctdb-nodestatus":
        ctdb.check_nodestatus()
    else:
        raise Fail("unknown subsystem: {}".format(ctx.cli.target))
07070100000020000081A4000000000000000000000001684BE19C00002557000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/cli.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from collections import namedtuple
import argparse
import importlib
import inspect
import logging
import typing

from sambacc import config
from sambacc import leader
from sambacc import opener
from sambacc import permissions
from sambacc import simple_waiter

_INOTIFY_OK = True
try:
    from sambacc import inotify_waiter as iw
except ImportError:
    _INOTIFY_OK = False

_logger = logging.getLogger(__name__)


class Fail(ValueError):
    pass


class Parser(typing.Protocol):
    """Minimal protocol for wrapping argument parser or similar."""

    def set_defaults(self, **kwargs: typing.Any) -> None:
        """Set a default value for an argument parser."""

    def add_argument(
        self, *args: typing.Any, **kwargs: typing.Any
    ) -> typing.Any:
        """Add an argument to be parsed."""


Command = namedtuple("Command", "name cmd_func arg_func cmd_help")


def toggle_option(parser: Parser, arg: str, dest: str, helpfmt: str) -> Parser:
    parser.add_argument(
        arg,
        action="store_true",
        dest=dest,
        help=helpfmt.format("Enable"),
    )
    negarg = arg.replace("--", "--no-")
    parser.add_argument(
        negarg,
        action="store_false",
        dest=dest,
        help=helpfmt.format("Disable"),
    )
    return parser


def ceph_id(
    value: typing.Union[str, dict[str, typing.Any]]
) -> dict[str, typing.Any]:
    """Parse a string value into a dict containing ceph id values.
    The input should contain name= or rados_id= to identify the kind
    of name being provided. As a shortcut a bare name can be provided
    and the code will guess at the kind.
    """
    if not isinstance(value, str):
        return value
    if value == "?":
        # A hack to avoid putting tons of ceph specific info in the normal
        # help output. There's probably a better way to do this but it
        # gets the job done for now.
        raise argparse.ArgumentTypeError(
            "requested help:"
            " Specify names in the form"
            " --ceph-id=[key=value][,key=value][,...]."
            ' Valid keys include "name" to set the exact name and "rados_id"'
            ' to specify a name that lacks the "client." prefix (that will'
            "automatically get added)."
            " Alternatively, specify just the name to allow the system to"
            " guess if the name is prefixed already or not."
        )
    result: dict[str, typing.Any] = {}
    # complex mode
    if "=" in value:
        for part in value.split(","):
            if "=" not in part:
                raise argparse.ArgumentTypeError(
                    f"unexpected value for ceph-id: {value!r}"
                )
            key, val = part.split("=", 1)
            if key == "name":
                result["client_name"] = val
                result["full_name"] = True
            elif key == "rados_id":
                result["client_name"] = val
                result["full_name"] = False
            else:
                b = f"unexpected key {key!r} in value for ceph-id: {value!r}"
                raise argparse.ArgumentTypeError(b)
    else:
        # this shorthand is meant mainly for lazy humans (me) when running test
        # images manually. The key-value form above is meant for automation.
        result["client_name"] = value
        # assume that if the name starts with client. it's the full name and
        # avoid having the ceph library double up an create client.client.x.
        result["full_name"] = value.startswith("client.")
    return result


def get_help(cmd: Command) -> str:
    if cmd.cmd_help is not None:
        return cmd.cmd_help
    if cmd.cmd_func.__doc__:
        return cmd.cmd_func.__doc__
    return ""


def add_command(subparsers: typing.Any, cmd: Command) -> None:
    subparser = subparsers.add_parser(cmd.name, help=get_help(cmd))
    subparser.set_defaults(cfunc=cmd.cmd_func)
    if cmd.arg_func is not None:
        cmd.arg_func(subparser)


class CommandBuilder:
    def __init__(self):
        self._commands = []
        self._names = set()

    def command(self, name, arg_func=None, cmd_help=None):
        if name in self._names:
            raise ValueError(f"{name} already in use")
        self._names.add(name)

        def _wrapper(f):
            self._commands.append(
                Command(
                    name=name, cmd_func=f, arg_func=arg_func, cmd_help=cmd_help
                )
            )
            return f

        return _wrapper

    def assemble(
        self, arg_func: typing.Optional[typing.Callable] = None
    ) -> argparse.ArgumentParser:
        parser = argparse.ArgumentParser()
        if arg_func is not None:
            arg_func(parser)
        subparsers = parser.add_subparsers()
        for cmd in self._commands:
            add_command(subparsers, cmd)
        return parser

    def dict(self) -> dict[str, Command]:
        """Return a dict mapping command names to Command object."""
        return {c.name: c for c in self._commands}

    def include(
        self, modname: str, *, package: str = "", check: bool = True
    ) -> None:
        """Import a python module to add commands to this command builder.
        If check is true and no new commands are added by the import, raise an
        error.
        """
        if modname.startswith(".") and not package:
            package = "sambacc.commands"
        mod = importlib.import_module(modname, package=package)
        if not check:
            return
        loaded_fns = {c.cmd_func for c in self._commands}
        mod_fns = {fn for _, fn in inspect.getmembers(mod, inspect.isfunction)}
        if not mod_fns.intersection(loaded_fns):
            raise Fail(f"import from {modname} did not add any new commands")

    def include_multiple(
        self, modnames: typing.Iterable[str], *, package: str = ""
    ) -> None:
        """Run the include function on multiple module names."""
        for modname in modnames:
            self.include(modname, package=package)


class Context(typing.Protocol):
    """Protocol type for CLI Context.
    Used to share simple, common state, derived from the CLI, across individual
    command functions.
    """

    # The expects_ctdb attribute indicates that the command can, and should,
    # make use of ctdb whenever ctdb is enabled in the configuration.
    expects_ctdb: bool

    @property
    def cli(self) -> argparse.Namespace:
        """Return a parsed command line namespace object."""

    @property
    def instance_config(self) -> config.InstanceConfig:
        """Return an instance config based on cli params and env."""

    @property
    def require_validation(self) -> typing.Optional[bool]:
        """Return true if configuration needs validation."""

    @property
    def opener(self) -> opener.Opener:
        """Return an appropriate opener object for this instance."""


def best_waiter(
    filename: typing.Optional[str] = None,
    max_timeout: typing.Optional[int] = None,
) -> simple_waiter.Waiter:
    """Fetch the best waiter type for our sambacc command."""
    if filename and _INOTIFY_OK:
        _logger.info("enabling inotify support")
        return iw.INotify(
            filename, print_func=_logger.info, timeout=max_timeout
        )
    # should max_timeout change Sleeper too? probably.
    return simple_waiter.Sleeper()


def best_leader_locator(
    iconfig: config.InstanceConfig,
) -> leader.LeaderLocator:
    """Fetch the best leader locator for our sambacc command.
    This only makes sense to be used in a clustered scenario.
    """
    from sambacc import ctdb

    return ctdb.CLILeaderLocator()


def perms_handler(
    config: config.PermissionsConfig,
    path: str,
) -> permissions.PermissionsHandler:
    """Fetch and instantiate the appropriate permissions handler for the given
    configuration.
    """
    if config.method == "none":
        _logger.info("Using no-op permissions handler")
        return permissions.NoopPermsHandler(
            path, config.status_xattr, options=config.options
        )
    if config.method == "initialize-share-perms":
        _logger.info("Using initializing posix permissions handler")
        return permissions.InitPosixPermsHandler(
            path, config.status_xattr, options=config.options
        )
    if config.method == "always-share-perms":
        _logger.info("Using always-setting posix permissions handler")
        return permissions.AlwaysPosixPermsHandler(
            path, config.status_xattr, options=config.options
        )
    # fall back to init perms handler
    _logger.info("Using initializing posix permissions handler")
    return permissions.InitPosixPermsHandler(
        path, config.status_xattr, options=config.options
    )


commands = CommandBuilder()
setup_steps = CommandBuilder()
07070100000021000081A4000000000000000000000001684BE19C000023A9000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/common.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import json
import logging
import os
import time
import typing

from sambacc import config
from sambacc import opener
from sambacc import rados_opener
from sambacc import samba_cmds
from sambacc import url_opener

from . import skips
from .cli import Parser, ceph_id

DEFAULT_CONFIG = "/etc/samba/container/config.json"
DEFAULT_JOIN_MARKER = "/var/lib/samba/container-join-marker.json"


class CommandContext:
    """CLI Context for standard samba-container commands."""

    def __init__(self, cli_args: argparse.Namespace):
        self._cli = cli_args
        self._iconfig: typing.Optional[config.InstanceConfig] = None
        self.expects_ctdb = False
        self._opener: typing.Optional[opener.Opener] = None

    @property
    def cli(self) -> argparse.Namespace:
        return self._cli

    @property
    def instance_config(self) -> config.InstanceConfig:
        if self._iconfig is None:
            cfgs = self.cli.config or []
            self._iconfig = config.read_config_files(
                cfgs,
                require_validation=self.require_validation,
                opener=self.opener,
            ).get(self.cli.identity)
        return self._iconfig

    @property
    def require_validation(self) -> typing.Optional[bool]:
        if self.cli.validate_config in ("required", "true"):
            return True
        if self.cli.validate_config == "false":
            return False
        return None

    @property
    def opener(self) -> opener.Opener:
        if self._opener is None:
            self._opener = opener.FallbackOpener([url_opener.URLOpener()])
        return self._opener


def split_entries(value: str) -> list[str]:
    """Split a env var up into separate strings. The string can be
    an "old school" colon seperated list of values (like PATH).
    Or, it can be JSON-formatted if it starts and ends with square
    brackets ('[...]'). Strings are the only permitted type within
    this JSON-formatted list.
    """
    out: list[str] = []
    if not isinstance(value, str):
        raise ValueError(value)
    if not value:
        return out
    # in order to cleanly allow passing uris as config "paths" we can't
    # simply split on colons. Avoid coming up with a hokey custom scheme
    # and enter "JSON-mode" if the env var starts and ends with brackets
    # hinting it contains a JSON list.
    v = value.rstrip(None)  # permit trailing whitespace (trailing only!)
    if v[0] == "[" and v[-1] == "]":
        for item in json.loads(v):
            if not isinstance(item, str):
                raise ValueError("Variable JSON must be a list of strings")
            out.append(item)
    else:
        # backwards compatibilty mode with `PATH` like syntax
        for part in value.split(":"):
            out.append(part)
    return out


def from_env(
    ns: argparse.Namespace,
    var: str,
    ename: str,
    default: typing.Any = None,
    convert_env: typing.Optional[typing.Callable] = None,
    convert_value: typing.Optional[typing.Callable] = str,
) -> None:
    """Bind an environment variable to a command line option. This allows
    certain cli options to be set from env vars if the cli option is
    not directly provided.
    """
    value = getattr(ns, var, None)
    if not value:
        value = os.environ.get(ename, "")
        if convert_env is not None:
            value = convert_env(value)
    if convert_value is not None:
        value = convert_value(value)
    if value:
        setattr(ns, var, value)


def env_to_cli(cli: argparse.Namespace) -> None:
    """Configure the sambacc default command line option to environment
    variable mappings.
    """
    from_env(
        cli,
        "config",
        "SAMBACC_CONFIG",
        convert_env=split_entries,
        convert_value=None,
        default=DEFAULT_CONFIG,
    )
    from_env(
        cli,
        "join_files",
        "SAMBACC_JOIN_FILES",
        convert_env=split_entries,
        convert_value=None,
    )
    from_env(cli, "identity", "SAMBA_CONTAINER_ID")
    from_env(cli, "username", "JOIN_USERNAME")
    from_env(cli, "password", "INSECURE_JOIN_PASSWORD")
    from_env(cli, "samba_debug_level", "SAMBA_DEBUG_LEVEL")
    from_env(cli, "validate_config", "SAMBACC_VALIDATE_CONFIG")
    from_env(cli, "ceph_id", "SAMBACC_CEPH_ID", convert_value=ceph_id)


def pre_action(cli: argparse.Namespace) -> None:
    """Handle debugging/diagnostic related options before the target
    action of the command is performed.
    """
    if cli.debug_delay:
        time.sleep(int(cli.debug_delay))
    if cli.samba_debug_level:
        samba_cmds.set_global_debug(cli.samba_debug_level)
    if cli.samba_command_prefix:
        samba_cmds.set_global_prefix([cli.samba_command_prefix])

    # should there be an option to force {en,dis}able rados?
    # Right now we just always try to enable rados when possible.
    rados_opener.enable_rados(
        url_opener.URLOpener,
        client_name=cli.ceph_id.get("client_name", ""),
        full_name=cli.ceph_id.get("full_name", False),
    )


def enable_logging(cli: argparse.Namespace) -> None:
    """Configure sambacc command line logging."""
    level = logging.DEBUG if cli.debug else logging.INFO
    logger = logging.getLogger()
    logger.setLevel(level)
    handler = logging.StreamHandler()
    handler.setFormatter(
        logging.Formatter("{asctime}: {levelname}: {message}", style="{")
    )
    handler.setLevel(level)
    logger.addHandler(handler)


def global_args(parser: Parser) -> None:
    """Configure sambacc default global command line arguments."""
    parser.add_argument(
        "--config",
        action="append",
        help=(
            "Specify source configuration"
            " (can also be set in the environment by SAMBACC_CONFIG)."
        ),
    )
    parser.add_argument(
        "--identity",
        help=(
            "A string identifying the local identity"
            " (can also be set in the environment by SAMBA_CONTAINER_ID)."
        ),
    )
    parser.add_argument(
        "--etc-passwd-path",
        default="/etc/passwd",
        help="Specify a path for the passwd file.",
    )
    parser.add_argument(
        "--etc-group-path",
        default="/etc/group",
        help="Specify a path for the group file.",
    )
    parser.add_argument(
        "--username",
        default="Administrator",
        help="Specify a user name for domain access.",
    )
    parser.add_argument(
        "--password", default="", help="Specify a password for domain access."
    )
    parser.add_argument(
        "--debug-delay",
        type=int,
        help="Delay activity for a specified number of seconds.",
    )
    parser.add_argument(
        "--join-marker",
        default=DEFAULT_JOIN_MARKER,
        help="Path to a file used to indicate a join has been peformed.",
    )
    parser.add_argument(
        "--samba-debug-level",
        choices=[str(v) for v in range(0, 11)],
        help="Specify samba debug level for commands.",
    )
    parser.add_argument(
        "--samba-command-prefix",
        help="Wrap samba commands within a supplied command prefix",
    )
    parser.add_argument(
        "--skip-if",
        dest="skip_conditions",
        action="append",
        type=skips.parse,
        help=(
            "Skip execution based on a condition. Conditions include"
            " 'file:[!]<path>', 'env:<var>(==|!=)<value>', and 'always:'."
            " (Pass `?` for more details)"
        ),
    )
    parser.add_argument(
        "--skip-if-file",
        action="append",
        dest="skip_conditions",
        type=skips.SkipFile.parse,
        help="(DEPRECATED) Perform no action if the specified path exists.",
    )
    parser.add_argument(
        "--validate-config",
        choices=("auto", "required", "true", "false"),
        help="Perform schema based validation of configuration.",
    )
    parser.add_argument(
        "--ceph-id",
        type=ceph_id,
        help=(
            "Specify a user/client ID to ceph libraries"
            "(can also be set in the environment by SAMBACC_CEPH_ID."
            " Ignored if Ceph RADOS libraries are not present or unused."
            " Pass `?` for more details)."
        ),
    )
    parser.add_argument(
        "--debug",
        action="store_true",
        help="Enable debug level logging of sambacc.",
    )
07070100000022000081A4000000000000000000000001684BE19C000014DA000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/config.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import functools
import logging
import subprocess
import sys
import typing

from sambacc import config
from sambacc import samba_cmds
from sambacc.simple_waiter import watch
import sambacc.netcmd_loader as nc
import sambacc.paths as paths

from .cli import (
    Context,
    best_leader_locator,
    best_waiter,
    commands,
    perms_handler,
    setup_steps,
)

_logger = logging.getLogger(__name__)


@commands.command(name="print-config")
def print_config(ctx: Context) -> None:
    """Display the samba configuration sourced from the sambacc config
    in the format of smb.conf.
    """
    nc.template_config(sys.stdout, ctx.instance_config)


@commands.command(name="import")
@setup_steps.command(name="config")
def import_config(ctx: Context) -> None:
    """Import configuration parameters from the sambacc config to
    samba's registry config.
    """
    # there are some expectations about what dirs exist and perms
    paths.ensure_samba_dirs()

    loader = nc.NetCmdLoader()
    loader.import_config(ctx.instance_config)


def _update_config_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--watch",
        action="store_true",
        help="If set, watch the source for changes and update config.",
    )


def _read_config(ctx: Context) -> config.InstanceConfig:
    cfgs = ctx.cli.config or []
    return config.read_config_files(
        cfgs,
        require_validation=ctx.require_validation,
        opener=ctx.opener,
    ).get(ctx.cli.identity)


UpdateResult = typing.Tuple[typing.Optional[config.InstanceConfig], bool]


def _update_config(
    current: config.InstanceConfig,
    previous: typing.Optional[config.InstanceConfig],
    ensure_paths: bool = True,
    notify_server: bool = True,
) -> UpdateResult:
    """Compare the current and previous instance configurations. If they
    differ, ensure any new paths, update the samba config, and inform any
    running smbds of the new configuration.  Return the current config and a
    boolean indicating if the instance configs differed.
    """
    # has the config changed?
    changed = current != previous
    # ensure share paths exist
    if changed and ensure_paths:
        for share in current.shares():
            path = share.path()
            if not path:
                continue
            _logger.info(f"Ensuring share path: {path}")
            paths.ensure_share_dirs(path)
            _logger.info(f"Updating permissions if needed: {path}")
            perms_handler(share.permissions_config(), path).update()
    # update smb config
    if changed:
        _logger.info("Updating samba configuration")
        loader = nc.NetCmdLoader()
        loader.import_config(current)
    # notify smbd of changes
    if changed and notify_server:
        subprocess.check_call(
            list(samba_cmds.smbcontrol["smbd", "reload-config"])
        )
    return current, changed


def _exec_if_leader(
    ctx: Context,
    cond_func: typing.Callable[..., UpdateResult],
) -> typing.Callable[..., UpdateResult]:
    """Run the cond func only on "nodes" that are the cluster leader."""

    # CTDB status and leader detection is not changeable at runtime.
    # we do not need to account for it changing in the updated config file(s)
    @functools.wraps(cond_func)
    def _call_if_leader(
        current: config.InstanceConfig, previous: config.InstanceConfig
    ) -> UpdateResult:
        with best_leader_locator(ctx.instance_config) as ll:
            if not ll.is_leader():
                _logger.info("skipping config update. node not leader")
                return None, False
            _logger.info("checking for update. node is leader")
            result = cond_func(current, previous)
        return result

    return _call_if_leader


@commands.command(name="update-config", arg_func=_update_config_args)
def update_config(ctx: Context) -> None:
    _get_config = functools.partial(_read_config, ctx)
    _cmp_func = _update_config

    if ctx.instance_config.with_ctdb:
        _logger.info("enabling ctdb support: will check for leadership")
        _cmp_func = _exec_if_leader(ctx, _cmp_func)

    if ctx.cli.watch:
        _logger.info("will watch configuration source")
        waiter = best_waiter(ctx.cli.config)
        watch(
            waiter,
            ctx.instance_config,
            _get_config,
            _cmp_func,
        )
    else:
        # we pass None as the previous config so that the command is
        # not nearly always a no-op when run from the command line.
        _cmp_func(_get_config(), None)
    return
07070100000023000081A4000000000000000000000001684BE19C000046C1000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/ctdb.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import contextlib
import logging
import os
import socket
import sys
import typing

from sambacc import ctdb
from sambacc import jfile
from sambacc import rados_opener
from sambacc import samba_cmds
from sambacc.simple_waiter import Sleeper, Waiter

from .cli import best_leader_locator, best_waiter, commands, Context, Fail

_logger = logging.getLogger(__name__)

# Rather irritatingly, k8s does not have a simple method for passing the
# ordinal index of a stateful set down to the containers. This has been
# proposed but not implemented yet. See:
#  https://github.com/kubernetes/kubernetes/issues/40651
# While I find putting any k8s specific knowledge in sambacc distasteful
# all we're really doing is teaching sambacc how to extract the node
# number from the host name, an operation that's not k8s specific.
# That isn't *too* dirty. Just a smudge really. :-)
_AFTER_LAST_DASH = "after-last-dash"


def _ctdb_ok():
    sambacc_ctdb = os.environ.get("SAMBACC_CTDB")
    gate = "ctdb-is-experimental"
    if sambacc_ctdb == gate:
        return
    print("Using CTDB with samba-container (sambacc) is experimental.")
    print("If you are developing or testing features for sambacc please")
    print("set the environment variable SAMBACC_CTDB to the value:")
    print("    ", gate)
    print("before continuing and try again.")
    print()
    raise Fail(gate)


def _ctdb_migrate_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--dest-dir",
        default=ctdb.DB_DIR,
        help="Specify where CTDB database files will be written.",
    )
    parser.add_argument(
        "--archive",
        help="Move converted TDB files to an archive dir.",
    )


def _ctdb_general_node_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--hostname",
        help="Specify the host name for the CTDB node",
    )
    parser.add_argument(
        "--node-number",
        type=int,
        help="Expected node number",
    )
    # This is a choice with a single acceptable param, rather than an on/off
    # bool, # in the case that other container orchs have a similar but not
    # quite the same issue and we want to support a different scheme someday.
    parser.add_argument(
        "--take-node-number-from-hostname",
        choices=(_AFTER_LAST_DASH,),
        help=(
            "Take the node number from the given host name following"
            " the specified policy."
        ),
    )
    parser.add_argument(
        "--take-node-number-from-env",
        "-E",
        const="NODE_NUMBER",
        nargs="?",
        help=(
            "Take the node number from the environment. If specified"
            " with a value, use that value as the environment variable"
            " name. Otherwise, use environment variable NODE_NUMBER."
        ),
    )
    parser.add_argument(
        "--persistent-path",
        help="Path to a persistent path for storing nodes file",
    )
    parser.add_argument(
        "--metadata-source",
        help=(
            "Specify location of cluster metadata state-tracking object."
            " This can be a file path or a URI-style identifier."
        ),
    )


def _ctdb_set_node_args(parser: argparse.ArgumentParser) -> None:
    _ctdb_general_node_args(parser)
    parser.add_argument(
        "--ip",
        help="Specify node by IP",
    )


class NodeParams:
    _ctx: Context
    node_number: typing.Optional[int] = None
    hostname: typing.Optional[str] = None
    persistent_path: str = ""
    _nodes_json: str = ""
    _cluster_meta_uri: str = ""
    _ip_addr: typing.Optional[str] = None
    _cluster_meta_obj: typing.Optional[ctdb.ClusterMeta] = None
    _waiter_obj: typing.Optional[Waiter] = None

    def __init__(self, ctx: Context):
        self._ctx = ctx
        ccfg = ctx.instance_config.ctdb_config()

        # stuff that many of the commands use
        self.persistent_path = ctx.cli.persistent_path
        if self.persistent_path is None:
            self.persistent_path = ccfg["nodes_path"]
        # nodes_json will now only be in the ctdb config section if it has been
        # specified by the user.
        self._nodes_json = ccfg.get("nodes_json") or ""
        # cluster_meta_uri can be a uri-ish string or path. It will be set with
        # a default value by the config even if there's no user supplied value.
        self._cluster_meta_uri = ccfg.get("cluster_meta_uri") or ""

        self.hostname = ctx.cli.hostname
        if ctx.cli.node_number is not None:
            if ctx.cli.node_number < 0:
                raise ValueError(f"invalid node number: {ctx.cli.node_number}")
            self.node_number = ctx.cli.node_number
        elif ctx.cli.take_node_number_from_hostname == _AFTER_LAST_DASH:
            if not self.hostname:
                raise ValueError(
                    "--hostname required if taking node number from host name"
                )
            if "-" not in self.hostname:
                raise ValueError(
                    f"invalid hostname for node number: {self.hostname}"
                )
            self.node_number = int(self.hostname.rsplit("-")[-1])
        elif ctx.cli.take_node_number_from_env:
            try:
                self.node_number = int(
                    os.environ[ctx.cli.take_node_number_from_env]
                )
            except (KeyError, ValueError):
                raise ValueError(
                    "failed to get node number from environment var"
                    f" {ctx.cli.take_node_number_from_env}"
                )
        else:
            self.node_number = None

    @property
    def node_ip_addr(self) -> str:
        if self._ip_addr is None:
            cli = self._ctx.cli
            if getattr(cli, "ip", None):
                self._ip_addr = cli.ip
            elif cli.hostname:
                self._ip_addr = _lookup_hostname(cli.hostname)
            else:
                raise ValueError("can not determine node ip")
        return self._ip_addr

    @property
    def identity(self) -> str:
        # this could be extended to use something like /etc/machine-id
        # or whatever in the future.
        if self.hostname:
            return self.hostname
        elif self.node_number:
            return f"node-{self.node_number}"
        else:
            # the dashes make this an invalid dns name
            return "-unknown-"

    @property
    def cluster_meta_uri(self) -> str:
        """Return a cluster meta uri value."""
        values = (
            # cli takes highest precedence
            self._ctx.cli.metadata_source,
            # _nodes_json should only be set if user set it using the old key
            self._nodes_json,
            # default or customized value on current key
            self._cluster_meta_uri,
        )
        for uri in values:
            if uri:
                return uri
        raise ValueError("failed to determine cluster_meta_uri")

    def _cluster_meta_init(self) -> None:
        uri = self.cluster_meta_uri
        # it'd be nice to re-use the opener infrastructure here but openers
        # don't do file modes the way we need for JSON state file or do
        # writable file types in the url_opener (urllib wrapper). For now, just
        # manually handle the string.
        if rados_opener.is_rados_uri(uri):
            self._cluster_meta_obj = (
                rados_opener.ClusterMetaRADOSObject.create_from_uri(uri)
            )
            self._waiter_obj = Sleeper()
            return
        if uri.startswith("file:"):
            path = uri.split(":", 1)[-1]
        else:
            path = uri
        if path.startswith("/"):
            path = "/" + path.rstrip("/")  # ensure one leading /
        self._cluster_meta_obj = jfile.ClusterMetaJSONFile(path)
        self._waiter_obj = best_waiter(path)

    def cluster_meta(self) -> ctdb.ClusterMeta:
        if self._cluster_meta_obj is None:
            self._cluster_meta_init()
        assert self._cluster_meta_obj is not None
        return self._cluster_meta_obj

    def cluster_meta_waiter(self) -> Waiter:
        if self._waiter_obj is None:
            self._cluster_meta_init()
        assert self._waiter_obj is not None
        return self._waiter_obj


@commands.command(name="ctdb-migrate", arg_func=_ctdb_migrate_args)
def ctdb_migrate(ctx: Context) -> None:
    """Migrate standard samba databases to CTDB databases."""
    _ctdb_ok()
    ctdb.migrate_tdb(ctx.instance_config, ctx.cli.dest_dir)
    if ctx.cli.archive:
        ctdb.archive_tdb(ctx.instance_config, ctx.cli.archive)


def _lookup_hostname(hostname: str) -> str:
    try:
        addrinfo = socket.getaddrinfo(
            hostname,
            None,
            family=socket.AF_UNSPEC,
            type=socket.SOCK_STREAM,
        )
        ipv6_address = None

        for family, _, _, _, sockaddr in addrinfo:
            if family == socket.AF_INET:
                ip_address = sockaddr[0]
                assert isinstance(ip_address, str)
                if ip_address.startswith("127."):
                    continue
                return ip_address

            if family == socket.AF_INET6 and ipv6_address is None:
                ip_address = sockaddr[0]
                assert isinstance(ip_address, str)
                if ip_address == "::1":
                    continue
                ipv6_address = ip_address

        if ipv6_address:
            return ipv6_address

        raise RuntimeError(
            f"No valid IP address found for hostname '{hostname}'."
        )

    except socket.gaierror as e:
        _logger.error(f"Failed to resolve hostname '{hostname}': {e}")
        raise


@commands.command(name="ctdb-set-node", arg_func=_ctdb_set_node_args)
def ctdb_set_node(ctx: Context) -> None:
    """Set up the current node in the ctdb and sambacc nodes files."""
    _ctdb_ok()
    np = NodeParams(ctx)
    expected_pnn = np.node_number

    try:
        ctdb.refresh_node_in_cluster_meta(
            cmeta=np.cluster_meta(),
            identity=np.identity,
            node=np.node_ip_addr,
            pnn=int(expected_pnn or 0),
        )
        return
    except ctdb.NodeNotPresent:
        pass

    ctdb.add_node_to_cluster_meta(
        cmeta=np.cluster_meta(),
        identity=np.identity,
        node=np.node_ip_addr,
        pnn=int(expected_pnn or 0),
        in_nodes=(expected_pnn == 0),
    )
    if expected_pnn == 0:
        ctdb.ensure_ctdb_node_present(
            node=np.node_ip_addr,
            expected_pnn=expected_pnn,
            real_path=np.persistent_path,
        )


@commands.command(name="ctdb-manage-nodes", arg_func=_ctdb_general_node_args)
def ctdb_manage_nodes(ctx: Context) -> None:
    """Run a long lived process to manage the cluster metadata. It can add new
    nodes. When a new node is found, if the current node is in the correct
    state, this node will add it to CTDB.
    """
    _ctdb_ok()
    np = NodeParams(ctx)
    expected_pnn = np.node_number or 0
    waiter = np.cluster_meta_waiter()

    limiter = ErrorLimiter("ctdb_manage_nodes", 10, pause_func=waiter.wait)
    while True:
        with limiter.catch():
            ctdb.manage_cluster_meta_updates(
                cmeta=np.cluster_meta(),
                pnn=expected_pnn,
                real_path=np.persistent_path,
                pause_func=waiter.wait,
            )


def _ctdb_monitor_nodes_args(parser: argparse.ArgumentParser) -> None:
    _ctdb_must_have_node_args(parser)
    parser.add_argument(
        "--reload",
        choices=("leader", "never", "all"),
        default="leader",
        help="Specify which nodes can command CTDB to reload nodes",
    )


@commands.command(name="ctdb-monitor-nodes", arg_func=_ctdb_monitor_nodes_args)
def ctdb_monitor_nodes(ctx: Context) -> None:
    """Run a long lived process to monitor the cluster metadata.
    Unlike ctdb_manage_nodes this function assumes that the node state
    file is externally managed and primarily exists to reflect any changes
    to the cluster meta into CTDB.
    """
    _ctdb_ok()
    np = NodeParams(ctx)
    waiter = np.cluster_meta_waiter()
    leader_locator = None
    if ctx.cli.reload == "leader":
        leader_locator = best_leader_locator(ctx.instance_config)
    reload_all = ctx.cli.reload == "all"
    nodes_file_path = np.persistent_path if ctx.cli.write_nodes else None

    _logger.info("monitoring cluster meta changes")
    _logger.debug(
        "reload_all=%s leader_locator=%r", reload_all, leader_locator
    )
    limiter = ErrorLimiter("ctdb_monitor_nodes", 10, pause_func=waiter.wait)
    while True:
        with limiter.catch():
            ctdb.monitor_cluster_meta_changes(
                cmeta=np.cluster_meta(),
                pause_func=waiter.wait,
                nodes_file_path=nodes_file_path,
                leader_locator=leader_locator,
                reload_all=reload_all,
            )


def _ctdb_must_have_node_args(parser: argparse.ArgumentParser) -> None:
    _ctdb_general_node_args(parser)
    parser.add_argument(
        "--write-nodes",
        action="store_true",
        help="Write ctdb nodes file based on cluster meta contents",
    )


@commands.command(
    name="ctdb-must-have-node", arg_func=_ctdb_must_have_node_args
)
def ctdb_must_have_node(ctx: Context) -> None:
    """Block until the current node is present in the ctdb nodes file."""
    _ctdb_ok()
    np = NodeParams(ctx)
    expected_pnn = np.node_number or 0
    waiter = np.cluster_meta_waiter()

    limiter = ErrorLimiter("ctdb_must_have_node", 10, pause_func=waiter.wait)
    while True:
        with limiter.catch():
            if ctdb.pnn_in_cluster_meta(
                cmeta=np.cluster_meta(),
                pnn=expected_pnn,
            ):
                break
            _logger.info("node not yet ready")
            waiter.wait()
    if ctx.cli.write_nodes:
        _logger.info("Writing nodes file")
        ctdb.cluster_meta_to_nodes(np.cluster_meta(), dest=np.persistent_path)


def _ctdb_rados_mutex_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--cluster-name",
        default="ceph",
        help="Cluster name to pass to mutex lock helper",
    )
    parser.add_argument(
        "mutex_uri",
        help="RADOS (pesudo) URI value for the object to use as a mutex",
    )


@commands.command(name="ctdb-rados-mutex", arg_func=_ctdb_rados_mutex_args)
def ctdb_rados_mutex(ctx: Context) -> None:
    """A command to wrap the rados ctdb_mutex_ceph_rados_helper and wrap
    & translate the container's ceph configuration into something
    the helper can understand.
    N.B. Another reason for this command is that ctdb requires the
    `cluster lock` value to be the same on all nodes.
    """
    if not rados_opener.is_rados_uri(ctx.cli.mutex_uri):
        raise ValueError(f"{ctx.cli.mutex_uri} is not a valid RADOS URI value")
    rinfo = rados_opener.parse_rados_uri(ctx.cli.mutex_uri)
    if rinfo["subtype"] != "object":
        raise ValueError(
            f"{ctx.cli.mutex_uri} is not a RADOS object URI value"
        )
    pool, namespace, objname = rinfo["pool"], rinfo["ns"], rinfo["key"]
    entity = ctx.cli.ceph_id["client_name"]
    if not entity:
        raise ValueError("a ceph authentication entity name is required")
    if not ctx.cli.ceph_id["full_name"]:
        entity = f"client.{entity}"
    # required arguments
    cmd = samba_cmds.ctdb_mutex_ceph_rados_helper[
        ctx.cli.cluster_name, entity, pool, objname  # cephx entity
    ]
    # optional namespace argument
    if namespace:
        cmd = cmd["-n", namespace]
    skip_reg_option = samba_cmds.ctdb_rados_mutex_skip_registration_opt()
    if skip_reg_option:
        # skip registring ctdb rados mutex helper as a service
        cmd = cmd[skip_reg_option]
    _logger.debug("executing command: %r", cmd)
    samba_cmds.execute(cmd)  # replaces process


@commands.command(name="ctdb-list-nodes", arg_func=_ctdb_general_node_args)
def ctdb_list_nodes(ctx: Context) -> None:
    """Write nodes content to stdout based on current cluster meta."""
    _ctdb_ok()
    np = NodeParams(ctx)

    ctdb.cluster_meta_to_nodes(np.cluster_meta(), sys.stdout)


class ErrorLimiter:
    def __init__(
        self,
        name: str,
        limit: int,
        *,
        pause_func: typing.Optional[typing.Callable] = None,
    ) -> None:
        self.name = name
        self.limit = limit
        self.errors = 0
        self.pause_func = pause_func

    def post_catch(self):
        if self.pause_func is not None:
            self.pause_func()

    @contextlib.contextmanager
    def catch(self) -> typing.Iterator[None]:
        try:
            _logger.debug(
                "error limiter proceeding: %s: errors=%r",
                self.name,
                self.errors,
            )
            yield
        except KeyboardInterrupt:
            raise
        except Exception as err:
            _logger.error(
                f"error during {self.name}: {err}, count={self.errors}",
                exc_info=True,
            )
            self.errors += 1
            if self.errors > self.limit:
                _logger.error(f"too many retries ({self.errors}). giving up")
                raise
            self.post_catch()
07070100000024000081A4000000000000000000000001684BE19C000005D2000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/dcmain.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing

from . import addc
from . import skips
from .cli import Fail, commands
from .common import (
    CommandContext,
    enable_logging,
    env_to_cli,
    global_args,
    pre_action,
)


default_cfunc = addc.summary


def main(args: typing.Optional[typing.Sequence[str]] = None) -> None:
    cli = commands.assemble(arg_func=global_args).parse_args(args)
    env_to_cli(cli)
    enable_logging(cli)
    if not cli.identity:
        raise Fail("missing container identity")

    pre_action(cli)
    ctx = CommandContext(cli)
    skip = skips.test(ctx)
    if skip:
        print(f"Command Skipped: {skip}")
        return
    cfunc = getattr(cli, "cfunc", default_cfunc)
    cfunc(CommandContext(cli))
    return


if __name__ == "__main__":
    main()
07070100000025000081A4000000000000000000000001684BE19C00000D71000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/dns.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import functools
import logging
import typing

from sambacc import container_dns

from .cli import commands, Context, best_waiter, best_leader_locator, Fail

_logger = logging.getLogger(__name__)


def _dns_register_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--watch",
        action="store_true",
        help="If set, watch the source for changes and update DNS.",
    )
    parser.add_argument(
        "--domain",
        default="",
        help="Manually specify parent domain for DNS entries.",
    )
    parser.add_argument(
        "--target",
        default=container_dns.EXTERNAL,
        choices=[container_dns.EXTERNAL, container_dns.INTERNAL],
        help="Register IPs that fulfill the given access target.",
    )
    parser.add_argument("source", help="Path to source JSON file.")


@commands.command(name="dns-register", arg_func=_dns_register_args)
def dns_register(ctx: Context) -> None:
    """Register container & container orchestration IPs with AD DNS."""
    # This command assumes a cooperating JSON state file.
    # This file is expected to be supplied & kept up to date by
    # a container-orchestration specific component.
    iconfig = ctx.instance_config
    domain = ctx.cli.domain or ""
    if not domain:
        try:
            domain = dict(iconfig.global_options())["realm"].lower()
        except KeyError:
            raise Fail("instance not configured with domain (realm)")

    update_func = functools.partial(
        container_dns.parse_and_update,
        target_name=ctx.cli.target,
    )

    if iconfig.with_ctdb:
        _logger.info("enabling ctdb support: will check for leadership")
        update_func = _exec_if_leader(iconfig, update_func)

    if ctx.cli.watch:
        _logger.info("will watch source")
        waiter = best_waiter(ctx.cli.source)
        container_dns.watch(
            domain,
            ctx.cli.source,
            update_func,
            waiter.wait,
            print_func=print,
        )
    else:
        update_func(domain, ctx.cli.source)
    return


def _exec_if_leader(iconfig, update_func):
    def leader_update_func(
        domain: str,
        source: str,
        previous: typing.Optional[container_dns.HostState] = None,
    ) -> typing.Tuple[typing.Optional[container_dns.HostState], bool]:
        with best_leader_locator(iconfig) as ll:
            if not ll.is_leader():
                _logger.info("skipping dns update. node not leader")
                return previous, False
            _logger.info("checking for update. node is leader")
            result = update_func(domain, source, previous)
        return result

    return leader_update_func
07070100000026000081A4000000000000000000000001684BE19C00001009000000000000000000000000000000000000003B00000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/initialize.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import logging
import typing

from sambacc import ctdb
from sambacc import paths
import sambacc.nsswitch_loader as nsswitch

from . import config  # noqa: F401
from . import users  # noqa: F401
from .cli import commands, perms_handler, setup_steps, Context


_logger = logging.getLogger(__name__)


@setup_steps.command("nsswitch")
def _import_nsswitch(ctx: Context) -> None:
    # should nsswitch validation/edit be conditional only on ads?
    paths = ["/etc/nsswitch.conf", "/usr/etc/nsswitch.conf"]
    for path in paths:
        nss = nsswitch.NameServiceSwitchLoader(path)
        try:
            nss.read()
            if not nss.winbind_enabled():
                nss.ensure_winbind_enabled()
                nss.write("/etc/nsswitch.conf")
            return
        except FileNotFoundError:
            pass

    raise FileNotFoundError(f"Failed to open {' or '.join(paths)}")


@setup_steps.command("smb_ctdb")
def _smb_conf_for_ctdb(ctx: Context) -> None:
    if ctx.instance_config.with_ctdb and ctx.expects_ctdb:
        _logger.info("Enabling ctdb in samba config file")
        ctdb.ensure_smb_conf(ctx.instance_config)


@setup_steps.command("ctdb_config")
def _ctdb_conf_for_ctdb(ctx: Context) -> None:
    if ctx.instance_config.with_ctdb and ctx.expects_ctdb:
        _logger.info("Ensuring ctdb config")
        ctdb.ensure_ctdb_conf(ctx.instance_config)


@setup_steps.command("ctdb_nodes")
def _ctdb_nodes_exists(ctx: Context) -> None:
    if ctx.instance_config.with_ctdb and ctx.expects_ctdb:
        _logger.info("Ensuring ctdb nodes file")
        persistent_path = ctx.instance_config.ctdb_config()["nodes_path"]
        ctdb.ensure_ctdb_nodes(
            ctdb_nodes=ctdb.read_ctdb_nodes(persistent_path),
            real_path=persistent_path,
        )


@setup_steps.command("ctdb_etc")
def _ctdb_etc_files(ctx: Context) -> None:
    if ctx.instance_config.with_ctdb and ctx.expects_ctdb:
        _logger.info("Ensuring ctdb etc files")
        ctdb.ensure_ctdbd_etc_files(iconfig=ctx.instance_config)


@setup_steps.command("share_paths")
@commands.command(name="ensure-share-paths")
def ensure_share_paths(ctx: Context) -> None:
    """Ensure the paths defined by the configuration exist."""
    # currently this is completely ignorant of things like vfs
    # modules that might "virtualize" the share path. It just
    # assumes that the path in the configuration is an absolute
    # path in the file system.
    for share in ctx.instance_config.shares():
        path = share.path()
        if not path:
            continue
        _logger.info(f"Ensuring share path: {path}")
        paths.ensure_share_dirs(path)
        _logger.info(f"Updating permissions if needed: {path}")
        perms_handler(share.permissions_config(), path).update()


_default_setup_steps = [
    "config",
    "users",
    "smb_ctdb",
    "users_passdb",
    "nsswitch",
]


def setup_step_names():
    """Return a list of names for the steps that init supports."""
    return list(setup_steps.dict().keys())


@commands.command(name="init")
def init_container(
    ctx: Context, steps: typing.Optional[typing.Iterable[str]] = None
) -> None:
    """Initialize the entire container environment."""
    steps = _default_setup_steps if steps is None else list(steps)
    cmds = setup_steps.dict()
    for step_name in steps:
        cmds[step_name].cmd_func(ctx)
07070100000027000081A4000000000000000000000001684BE19C000011C8000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/join.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sys
import typing

import sambacc.join as joinutil

from .cli import (
    Context,
    Fail,
    Parser,
    best_waiter,
    commands,
    toggle_option,
)


def _print_join_error(err: typing.Any) -> None:
    print(f"ERROR: {err}", file=sys.stderr)
    for suberr in getattr(err, "errors", []):
        print(f"  - {suberr}", file=sys.stderr)


def _add_join_sources(joiner: joinutil.Joiner, cli: typing.Any) -> None:
    if cli.insecure or getattr(cli, "insecure_auto_join", False):
        upass = joinutil.UserPass(cli.username, cli.password)
        joiner.add_pw_source(upass)
    if cli.files:
        for path in cli.join_files or []:
            joiner.add_file_source(path)
    if cli.odj_files:
        for path in cli.odj_files:
            joiner.add_odj_file_source(path)
    if cli.interactive:
        upass = joinutil.UserPass(cli.username)
        joiner.add_interactive_source(upass)


def _join_args_common(parser: Parser) -> None:
    toggle_option(
        parser,
        arg="--insecure",
        dest="insecure",
        helpfmt="{} taking user/password from CLI or environment.",
    )
    toggle_option(
        parser,
        arg="--files",
        dest="files",
        helpfmt="{} reading user/password from JSON files.",
    )
    parser.add_argument(
        "--join-file",
        "-j",
        dest="join_files",
        action="append",
        help="Path to file with user/password in JSON format.",
    )
    parser.add_argument(
        "--odj-file",
        dest="odj_files",
        action="append",
        help="Path to an Offline Domain Join (ODJ) provisioning data file",
    )


def _join_args(parser: Parser) -> None:
    parser.set_defaults(insecure=False, files=True, interactive=True)
    _join_args_common(parser)
    toggle_option(
        parser,
        arg="--interactive",
        dest="interactive",
        helpfmt="{} interactive password prompt.",
    )


@commands.command(name="join", arg_func=_join_args)
def join(ctx: Context) -> None:
    """Perform a domain join. The supported sources for join
    can be provided by supplying command line arguments.
    This includes an *insecure* mode that sources the password
    from the CLI or environment. Use this only on
    testing/non-production purposes.
    """
    # maybe in the future we'll have more secure methods
    joiner = joinutil.Joiner(ctx.cli.join_marker, opener=ctx.opener)
    _add_join_sources(joiner, ctx.cli)
    try:
        joiner.join()
    except joinutil.JoinError as err:
        _print_join_error(err)
        raise Fail("failed to join to a domain")


def _must_join_args(parser: Parser) -> None:
    parser.set_defaults(insecure=False, files=True, wait=True)
    _join_args_common(parser)
    toggle_option(
        parser,
        arg="--wait",
        dest="wait",
        helpfmt="{} waiting until a join is done.",
    )


@commands.command(name="must-join", arg_func=_must_join_args)
def must_join(ctx: Context) -> None:
    """If possible, perform an unattended domain join. Otherwise,
    exit or block until a join has been perfmed by another process.
    """
    joiner = joinutil.Joiner(ctx.cli.join_marker, opener=ctx.opener)
    if joiner.did_join():
        print("already joined")
        return
    # Interactive join is not allowed on must-join
    setattr(ctx.cli, "interactive", False)
    _add_join_sources(joiner, ctx.cli)
    if ctx.cli.wait:
        waiter = best_waiter(ctx.cli.join_marker, max_timeout=120)
        joinutil.join_when_possible(
            joiner, waiter, error_handler=_print_join_error
        )
    else:
        try:
            joiner.join()
        except joinutil.JoinError as err:
            _print_join_error(err)
            raise Fail(
                "failed to join to a domain - waiting for join is disabled"
            )
07070100000028000081A4000000000000000000000001684BE19C00000653000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/main.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing


from . import config as config_cmds
from . import skips
from .cli import commands, Fail
from .common import (
    CommandContext,
    enable_logging,
    env_to_cli,
    global_args,
    pre_action,
)

default_cfunc = config_cmds.print_config


def main(args: typing.Optional[typing.Sequence[str]] = None) -> None:
    commands.include_multiple(
        [".check", ".ctdb", ".dns", ".initialize", ".join", ".run", ".users"]
    )

    cli = commands.assemble(arg_func=global_args).parse_args(args)
    env_to_cli(cli)
    enable_logging(cli)
    if not cli.identity:
        raise Fail("missing container identity")

    pre_action(cli)
    ctx = CommandContext(cli)
    skip = skips.test(ctx)
    if skip:
        print(f"Command Skipped: {skip}")
        return
    cfunc = getattr(cli, "cfunc", default_cfunc)
    cfunc(ctx)
    return


if __name__ == "__main__":
    main()
07070100000029000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000003B00000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/remotecontrol0707010000002A000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000004700000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/remotecontrol/__init__.py0707010000002B000081A4000000000000000000000001684BE19C00000680000000000000000000000000000000000000004300000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/remotecontrol/main.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sys
import typing


from .. import skips
from ..cli import Context, Fail, commands
from ..common import (
    CommandContext,
    enable_logging,
    env_to_cli,
    global_args,
    pre_action,
)


def _default(ctx: Context) -> None:
    sys.stdout.write(f"{sys.argv[0]} requires a subcommand, like 'serve'.\n")
    sys.exit(1)


def main(args: typing.Optional[typing.Sequence[str]] = None) -> None:
    pkg = "sambacc.commands.remotecontrol"
    commands.include(".server", package=pkg)

    cli = commands.assemble(arg_func=global_args).parse_args(args)
    env_to_cli(cli)
    enable_logging(cli)
    if not cli.identity:
        raise Fail("missing container identity")

    pre_action(cli)
    ctx = CommandContext(cli)
    skip = skips.test(ctx)
    if skip:
        print(f"Command Skipped: {skip}")
        return
    cfunc = getattr(cli, "cfunc", _default)
    cfunc(ctx)
    return


if __name__ == "__main__":
    main()
0707010000002C000081A4000000000000000000000001684BE19C00000E78000000000000000000000000000000000000004500000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/remotecontrol/server.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import logging
import signal
import sys
import typing

from ..cli import Context, Fail, commands

_logger = logging.getLogger(__name__)
_MTLS = "mtls"
_FORCE = "force"


def _serve_args(parser: argparse.ArgumentParser) -> None:
    parser.add_argument(
        "--address",
        "-a",
        help="Specify an {address:port} value to bind to.",
    )
    # Force an explicit choice of (the only) rpc type in order to clearly
    # prepare the space for possible alternatives
    egroup = parser.add_mutually_exclusive_group(required=True)
    egroup.add_argument(
        "--grpc",
        dest="rpc_type",
        action="store_const",
        default="grpc",
        const="grpc",
        help="Use gRPC",
    )
    # security settings
    parser.add_argument(
        "--insecure",
        action="store_true",
        help="Disable TLS",
    )
    parser.add_argument(
        "--allow-modify",
        choices=(_MTLS, _FORCE),
        default=_MTLS,
        help="Control modification mode",
    )
    parser.add_argument(
        "--tls-key",
        help="Server TLS Key",
    )
    parser.add_argument(
        "--tls-cert",
        help="Server TLS Certificate",
    )
    parser.add_argument(
        "--tls-ca-cert",
        help="CA Certificate",
    )


class Restart(Exception):
    pass


@commands.command(name="serve", arg_func=_serve_args)
def serve(ctx: Context) -> None:
    """Start an RPC server."""

    def _handler(*args: typing.Any) -> None:
        raise Restart()

    signal.signal(signal.SIGHUP, _handler)
    while True:
        try:
            _serve(ctx)
            return
        except KeyboardInterrupt:
            _logger.info("Exiting")
            sys.exit(0)
        except Restart:
            _logger.info("Re-starting server")
            continue


def _serve(ctx: Context) -> None:
    import sambacc.grpc.backend
    import sambacc.grpc.server

    config = sambacc.grpc.server.ServerConfig()
    config.insecure = bool(ctx.cli.insecure)
    if ctx.cli.address:
        config.address = ctx.cli.address
    if not (ctx.cli.insecure or ctx.cli.tls_key):
        raise Fail("Specify --tls-key=... or --insecure")
    if not (ctx.cli.insecure or ctx.cli.tls_cert):
        raise Fail("Specify --tls-cert=... or --insecure")
    if ctx.cli.tls_key:
        config.server_key = _read(ctx, ctx.cli.tls_key)
    if ctx.cli.tls_cert:
        config.server_cert = _read(ctx, ctx.cli.tls_cert)
    if ctx.cli.tls_ca_cert:
        config.ca_cert = _read(ctx, ctx.cli.tls_ca_cert)
    config.read_only = not (
        ctx.cli.allow_modify == _FORCE
        or (not config.insecure and config.ca_cert)
    )

    backend = sambacc.grpc.backend.ControlBackend(ctx.instance_config)
    sambacc.grpc.server.serve(config, backend)


def _read(ctx: Context, path_or_url: str) -> bytes:
    with ctx.opener.open(path_or_url) as fh:
        content = fh.read()
    return content if isinstance(content, bytes) else content.encode()
0707010000002D000081A4000000000000000000000001684BE19C00001388000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/run.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import contextlib
import logging
import signal
import time
import typing

from sambacc import samba_cmds
import sambacc.paths as paths

from .cli import commands, Context, Fail
from .initialize import init_container, setup_step_names
from .join import join


_logger = logging.getLogger(__name__)

INIT_ALL = "init-all"
SMBD = "smbd"
WINBINDD = "winbindd"
CTDBD = "ctdbd"
TARGETS = [SMBD, WINBINDD, CTDBD]


class WaitForCTDBCondition:
    def met(self, ctx: Context) -> bool:
        target = getattr(ctx.cli, "target", None)
        if target == CTDBD:
            raise Fail(f"Can not start and wait for {CTDBD}")
        _logger.debug("Condition required: ctdb pnn available")
        import sambacc.ctdb

        pnn = sambacc.ctdb.current_pnn()
        ok = pnn is not None
        _logger.debug(
            "Condition %s: ctdb pnn available: %s",
            "met" if ok else "not met",
            pnn,
        )
        return ok


_wait_for_conditions = {"ctdb": WaitForCTDBCondition}


def _run_container_args(parser):
    parser.add_argument(
        "--no-init",
        action="store_true",
        help=(
            "(DEPRECATED - see --setup) Do not initialize the container"
            " envionment. Only start running the target process."
        ),
    )
    _setup_choices = [INIT_ALL] + list(setup_step_names())
    parser.add_argument(
        "--setup",
        action="append",
        choices=_setup_choices,
        help=(
            "Specify one or more setup step names to preconfigure the"
            " container environment before the server process is started."
            " The special 'init-all' name will perform all known setup steps."
        ),
    )
    _wait_for_choices = _wait_for_conditions.keys()
    parser.add_argument(
        "--wait-for",
        action="append",
        choices=_wait_for_choices,
        help=(
            "Specify a condition to wait for prior to starting the server"
            " process. Available conditions: `ctdb` - wait for ctdb"
            " to run and provide a pnn."
        ),
    )
    parser.add_argument(
        "--insecure-auto-join",
        action="store_true",
        help=(
            "Perform an inscure domain join prior to starting a service."
            " Based on env vars JOIN_USERNAME and INSECURE_JOIN_PASSWORD."
        ),
    )
    parser.add_argument(
        "target",
        choices=TARGETS,
        help="Which process to run",
    )


_COND_TIMEOUT = 5 * 60


@contextlib.contextmanager
def _timeout(timeout: int) -> typing.Iterator[None]:
    def _handler(sig: int, frame: typing.Any) -> None:
        raise RuntimeError("timed out waiting for conditions")

    signal.signal(signal.SIGALRM, _handler)
    signal.alarm(timeout)
    yield
    signal.alarm(0)
    signal.signal(signal.SIGALRM, signal.SIG_DFL)


@commands.command(name="run", arg_func=_run_container_args)
def run_container(ctx: Context) -> None:
    """Run a specified server process."""
    if ctx.cli.no_init and ctx.cli.setup:
        raise Fail("can not specify both --no-init and --setup")

    if ctx.cli.wait_for:
        with _timeout(_COND_TIMEOUT):
            conditions = [_wait_for_conditions[n]() for n in ctx.cli.wait_for]
            while not all(c.met(ctx) for c in conditions):
                time.sleep(1)

    # running servers expect to make use of ctdb whenever it is configured
    ctx.expects_ctdb = True
    if not ctx.cli.no_init and not ctx.cli.setup:
        # TODO: drop this along with --no-init and move to a opt-in
        # rather than opt-out form of pre-run setup
        init_container(ctx)
    elif ctx.cli.setup:
        steps = list(ctx.cli.setup)
        init_container(ctx, steps=(None if INIT_ALL in steps else steps))

    paths.ensure_samba_dirs()
    if ctx.cli.target == "smbd":
        # execute smbd process
        samba_cmds.execute(samba_cmds.smbd_foreground())
    elif ctx.cli.target == "winbindd":
        if getattr(ctx.cli, "insecure_auto_join", False):
            join(ctx)
        # execute winbind process
        samba_cmds.execute(samba_cmds.winbindd_foreground())
    elif ctx.cli.target == "ctdbd":
        samba_cmds.execute(samba_cmds.ctdbd_foreground)
    else:
        raise Fail(f"invalid target process: {ctx.cli.target}")
0707010000002E000081A4000000000000000000000001684BE19C00001783000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/skips.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2024  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from typing import Optional
import argparse
import os

from sambacc.typelets import Self

from .cli import Context


class SkipIf:
    """Base class for objects used to check if a particular sambacc command
    should be skipped.
    Skips are useful when different commands are chained together
    unconditionally in a configuration file (like k8s init containers) but
    certain commmands should not be run.
    """

    NAME: str = ""

    def test(self, ctx: Context) -> Optional[str]:
        """Return a string explaining the reason for the skip or None
        indicating no skip is desired.
        """
        raise NotImplementedError()  # pragma: nocover

    @classmethod
    def parse(cls, value: str) -> Self:
        """Parse a string into a skip class arguments."""
        raise NotImplementedError()  # pragma: nocover


class SkipFile(SkipIf):
    """Skip execution if a file exists or does not exist.
    The input value "file:/foo/bar" will trigger a skip if the file /foo/bar
    exists. To skip if a file does not exist, use "file:!/foo/bar" - prefix the
    file name with an exclaimation point.
    """

    NAME: str = "file"
    inverted: bool = False
    path: str = ""

    @classmethod
    def parse(cls, value: str) -> Self:
        obj = cls()
        if not value:
            raise ValueError("missing path")
        if value[0] == "!":
            obj.inverted = True
            value = value[1:]
        obj.path = value
        return obj

    def test(self, ctx: Context) -> Optional[str]:
        exists = os.path.exists(self.path)
        if self.inverted and not exists:
            return f"skip-if-file-missing: {self.path} missing"
        if not self.inverted and exists:
            return f"skip-if-file-exists: {self.path} exists"
        return None


class SkipEnv(SkipIf):
    """Skip execution if an environment variable is, or is not, equal to a
    value. The specification is roughly "env:<ENV_VAR><op><VALUE>" where op may
    be either `==` or `!=`. For example, "env:FLAVOR==cherry" will skip
    execution if the environment variable "FLAVOR" contains the value "cherry".
    "env:FLAVOR!=cherry" will skip execution if "FLAVOR" contains any value
    other than "cherry".
    """

    NAME: str = "env"
    _EQ = "=="
    _NEQ = "!="

    def __init__(self, op: str, var_name: str, value: str) -> None:
        self.op = op
        self.var_name = var_name
        self.target_value = value

    @classmethod
    def parse(cls, value: str) -> Self:
        if cls._EQ in value:
            op = cls._EQ
        elif cls._NEQ in value:
            op = cls._NEQ
        else:
            raise ValueError("invalid SkipEnv: missing or invalid operation")
        lhv, rhv = value.split(op, 1)
        return cls(op, lhv, rhv)

    def test(self, ctx: Context) -> Optional[str]:
        env_val = os.environ.get(self.var_name)
        if self.op == self._EQ and env_val == self.target_value:
            return (
                f"env var: {self.var_name}"
                f" -> {env_val} {self.op} {self.target_value}"
            )
        if self.op == self._NEQ and env_val != self.target_value:
            return (
                f"env var: {self.var_name}"
                f" -> {env_val} {self.op} {self.target_value}"
            )
        return None


class SkipAlways(SkipIf):
    """Skip execution unconditionally. Must be specified as "always:" and takes
    no value after the colon.
    """

    NAME: str = "always"

    @classmethod
    def parse(cls, value: str) -> Self:
        if value:
            raise ValueError("always skip takes no value")
        return cls()

    def test(self, ctx: Context) -> Optional[str]:
        return "always skip"


_SKIP_TYPES = [SkipFile, SkipEnv, SkipAlways]


def test(
    ctx: Context, *, conditions: Optional[list[SkipIf]] = None
) -> Optional[str]:
    """Return a string explaining the reason for a skip or None indicating
    no skip should be performed. Typically the skip conditions will be
    derived from the command line arguments but can be passed in manually
    using the `conditions` keyword argument.
    """
    if not conditions:
        conditions = ctx.cli.skip_conditions or []
    for cond in conditions:
        skip = cond.test(ctx)
        if skip:
            return skip
    return None


def parse(value: str) -> SkipIf:
    """Given a string return a SkipIf-based object. Every value must be
    prefixed with the skip "type" (the skip type's NAME).
    """
    if value == "?":
        # A hack to avoid putting tons of documentation into the help output.
        raise argparse.ArgumentTypeError(_help_info())
    for sk in _SKIP_TYPES:
        assert issubclass(sk, SkipIf)
        prefix = f"{sk.NAME}:"
        plen = len(prefix)
        if value.startswith(prefix):
            return sk.parse(value[plen:])
    raise KeyError("no matching skip rule for: {value!r}")


def _help_info() -> str:
    msgs = ["Skip conditions help details:", ""]
    for sk in _SKIP_TYPES:
        assert issubclass(sk, SkipIf)
        msgs.append(f"== Skip execution on condition `{sk.NAME}` ==")
        assert sk.__doc__
        for line in sk.__doc__.splitlines():
            msgs.append(line.strip())
        msgs.append("")
    return "\n".join(msgs)
0707010000002F000081A4000000000000000000000001684BE19C000007B8000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/sambacc/commands/users.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sambacc.passdb_loader as passdb
import sambacc.passwd_loader as ugl

from .cli import commands, setup_steps, Context


@commands.command(name="import-users")
def import_users(ctx: Context) -> None:
    """Import users and groups from the sambacc config to the passwd
    and group files to support local (non-domain based) login.
    """
    import_sys_users(ctx)
    import_passdb_users(ctx)


@setup_steps.command("users")
def import_sys_users(ctx: Context) -> None:
    """Import users and groups from sambacc config to the passwd and
    group files.
    """
    etc_passwd_loader = ugl.PasswdFileLoader(ctx.cli.etc_passwd_path)
    etc_group_loader = ugl.GroupFileLoader(ctx.cli.etc_group_path)

    etc_passwd_loader.read()
    etc_group_loader.read()
    for u in ctx.instance_config.users():
        etc_passwd_loader.add_user(u)
    for g in ctx.instance_config.groups():
        etc_group_loader.add_group(g)
    etc_passwd_loader.write()
    etc_group_loader.write()


@setup_steps.command("users_passdb")
def import_passdb_users(ctx: Context) -> None:
    """Import users into samba's passdb."""
    smb_passdb_loader = passdb.PassDBLoader()
    for u in ctx.instance_config.users():
        smb_passdb_loader.add_user(u)
    return
07070100000030000081A4000000000000000000000001684BE19C00004C83000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/sambacc/config.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from __future__ import annotations

import binascii
import enum
import errno
import json
import sys
import typing

from .opener import Opener, FileOpener

_VALID_VERSIONS = ["v0"]

# alias open to _open to support test assertions when running
# as UID 0
_open = open

PasswdEntryTuple = typing.Tuple[str, str, str, str, str, str, str]
GroupEntryTuple = typing.Tuple[str, str, str, str]

# JSONData is not really a completely valid representation of json in
# the type system, but it's good enough for now. Using it can help
# clarify what works with json and what works with real python dicts.
JSONData = dict[str, typing.Any]

# the standard location for samba's smb.conf
SMB_CONF = "/etc/samba/smb.conf"
CLUSTER_META_JSON = "/var/lib/ctdb/shared/ctdb-nodes.json"
CTDB_NODES_PATH = "/var/lib/ctdb/shared/nodes"
CTDB_RECLOCK = "/var/lib/ctdb/shared/RECOVERY"

CTDB: typing.Final[str] = "ctdb"
ADDC: typing.Final[str] = "addc"
FEATURES: typing.Final[str] = "instance_features"

# cache for json schema data
_JSON_SCHEMA: dict[str, typing.Any] = {}


class ConfigFormat(enum.Enum):
    JSON = "json"
    TOML = "toml"
    YAML = "yaml"


class ValidationUnsupported(Exception):
    pass


class _FakeRefResolutionError(Exception):
    pass


class ConfigFormatUnsupported(Exception):
    pass


if sys.version_info >= (3, 11):

    def _load_toml(source: typing.IO) -> JSONData:
        try:
            import tomllib
        except ImportError:
            raise ConfigFormatUnsupported(ConfigFormat.TOML)
        return tomllib.load(source)

else:

    def _load_toml(source: typing.IO) -> JSONData:
        try:
            import tomli
        except ImportError:
            raise ConfigFormatUnsupported(ConfigFormat.TOML)
        if typing.TYPE_CHECKING:
            assert isinstance(source, typing.BinaryIO)
        return tomli.load(source)


def _load_yaml(source: typing.IO) -> JSONData:
    try:
        import yaml
    except ImportError:
        raise ConfigFormatUnsupported(ConfigFormat.YAML)
    return yaml.safe_load(source) or {}


def _detect_format(fname: str) -> ConfigFormat:
    if fname.endswith(".toml"):
        return ConfigFormat.TOML
    if fname.endswith((".yaml", ".yml")):
        return ConfigFormat.YAML
    return ConfigFormat.JSON


def _schema_validate(data: dict[str, typing.Any], version: str) -> None:
    try:
        import jsonschema  # type: ignore[import]
    except ImportError:
        raise ValidationUnsupported()
    try:
        _refreserror = getattr(jsonschema, "RefResolutionError")
    except AttributeError:
        _refreserror = _FakeRefResolutionError

    if version == "v0" and version not in _JSON_SCHEMA:
        try:
            import sambacc.schema.conf_v0_schema

            _JSON_SCHEMA[version] = sambacc.schema.conf_v0_schema.SCHEMA
        except ImportError:
            raise ValidationUnsupported()
    try:
        jsonschema.validate(instance=data, schema=_JSON_SCHEMA[version])
    except _refreserror:
        raise ValidationUnsupported()


def _check_config_version(data: JSONData) -> str:
    """Return the config data or raise a ValueError if the config
    is invalid or incomplete.
    """
    # short-cut to validate that this is something we want to consume
    version = data.get("samba-container-config")
    if version is None:
        raise ValueError("Invalid config: no samba-container-config key")
    elif version not in _VALID_VERSIONS:
        raise ValueError(f"Invalid config: unknown version {version}")
    return version


def _check_config_valid(
    data: JSONData, version: str, required: typing.Optional[bool] = None
) -> None:
    if required or required is None:
        try:
            _schema_validate(data, version)
        except ValidationUnsupported:
            if required:
                raise


def read_config_files(
    fnames: list[str],
    *,
    require_validation: typing.Optional[bool] = None,
    opener: typing.Optional[Opener] = None,
) -> GlobalConfig:
    """Read the global container config from the given filenames.
    At least one of the files from the fnames list must exist and contain
    a valid config. If none of the file names exist an error will be raised.
    """
    # NOTE: Right now if more than one config exists they'll be "merged" but
    # the merging is very simplistic right now. Mainly we expect that the
    # users will be split from the main config (for security reasons) but
    # it would be nicer to have a good merge algorithm handle everything
    # smarter at some point.
    opener = opener or FileOpener()
    gconfig = GlobalConfig()
    readfiles = set()
    for fname in fnames:
        config_format = _detect_format(str(fname))
        try:
            with opener.open(fname) as fh:
                gconfig.load(
                    fh,
                    require_validation=require_validation,
                    config_format=config_format,
                )
            readfiles.add(fname)
        except OSError as err:
            if getattr(err, "errno", 0) != errno.ENOENT:
                raise
    if not readfiles:
        # we read nothing! don't proceed
        raise ValueError(f"None of the config file paths exist: {fnames}")
    return gconfig


class SambaConfig(typing.Protocol):
    """Minimal samba configuration protocol."""

    def global_options(self) -> typing.Iterable[typing.Tuple[str, str]]:
        """Return global options for Samba."""
        ...  # pragma: no cover

    def shares(self) -> typing.Iterable[ShareConfig]:
        """Return share configurations for Samba."""
        ...  # pragma: no cover


class GlobalConfig:
    def __init__(
        self,
        source: typing.Optional[typing.IO] = None,
        *,
        initial_data: typing.Optional[JSONData] = None,
    ) -> None:
        self.data: JSONData = {} if initial_data is None else initial_data
        if source is not None:
            self.load(source)

    def load(
        self,
        source: typing.IO,
        *,
        require_validation: typing.Optional[bool] = None,
        config_format: typing.Optional[ConfigFormat] = None,
    ) -> None:
        config_format = config_format or ConfigFormat.JSON
        if config_format == ConfigFormat.TOML:
            data = _load_toml(source)
        elif config_format == ConfigFormat.YAML:
            data = _load_yaml(source)
        else:
            data = json.load(source)
        _check_config_valid(
            data, _check_config_version(data), require_validation
        )
        self.data.update(data)

    def get(self, ident: str) -> InstanceConfig:
        iconfig = self.data["configs"][ident]
        return InstanceConfig(self, iconfig)


class InstanceConfig:
    def __init__(self, conf: GlobalConfig, iconfig: dict):
        self.gconfig = conf
        self.iconfig = iconfig

    def global_options(self) -> typing.Iterable[typing.Tuple[str, str]]:
        """Iterate over global options."""
        # Pull in all global sections that apply
        try:
            gnames = self.iconfig["globals"]
        except KeyError:
            # no globals section in the instance means no global options
            return
        for gname in gnames:
            global_section = self.gconfig.data["globals"][gname]
            for k, v in global_section.get("options", {}).items():
                yield k, v
        # Special, per-instance settings
        instance_name = self.iconfig.get("instance_name", None)
        if instance_name:
            yield "netbios name", instance_name

    def uid_base(self) -> int:
        return 1000

    def gid_base(self) -> int:
        return 1000

    def shares(self) -> typing.Iterable[ShareConfig]:
        """Iterate over share configs."""
        for sname in self.iconfig.get("shares", []):
            yield ShareConfig(self.gconfig, sname, iconfig=self.iconfig)

    def users(self) -> typing.Iterable[UserEntry]:
        all_users = self.gconfig.data.get("users", {}).get("all_entries", {})
        for n, entry in enumerate(all_users):
            yield UserEntry(self, entry, n)

    def groups(self) -> typing.Iterable[GroupEntry]:
        user_gids = {u.gid: u for u in self.users()}
        all_groups = self.gconfig.data.get("groups", {}).get("all_entries", {})
        for n, entry in enumerate(all_groups):
            ge = GroupEntry(self, entry, n)
            if ge.gid in user_gids:
                del user_gids[ge.gid]
            yield ge
        for uentry in user_gids.values():
            yield uentry.vgroup()

    @property
    def with_ctdb(self) -> bool:
        return CTDB in self.iconfig.get(FEATURES, [])

    @property
    def with_addc(self) -> bool:
        return ADDC in self.iconfig.get(FEATURES, [])

    def ctdb_smb_config(self) -> CTDBSambaConfig:
        if not self.with_ctdb:
            raise ValueError("ctdb not supported in configuration")
        return CTDBSambaConfig()

    def ctdb_config(self) -> dict[str, str]:
        """Common configuration of CTDB itself."""
        if not self.with_ctdb:
            return {}
        ctdb = dict(self.gconfig.data.get("ctdb", {}))
        ctdb.setdefault("cluster_meta_uri", CLUSTER_META_JSON)
        ctdb.setdefault("nodes_path", CTDB_NODES_PATH)
        ctdb.setdefault("recovery_lock", CTDB_RECLOCK)
        ctdb.setdefault("log_level", "NOTICE")
        ctdb.setdefault("script_log_level", "ERROR")
        ctdb.setdefault("realtime_scheduling", "false")
        # this whole thing really needs to be turned into a real object type
        ctdb.setdefault("public_addresses", [])
        ctdb.setdefault("ctdb_port", 0)
        return ctdb

    def domain(self) -> DomainConfig:
        """Return the general domain settings for this DC instance."""
        if not self.with_addc:
            raise ValueError("ad dc not supported by configuration")
        domains = self.gconfig.data.get("domain_settings", {})
        instance_name: str = self.iconfig.get("instance_name", "")
        return DomainConfig(
            drec=domains[self.iconfig["domain_settings"]],
            instance_name=instance_name,
        )

    def domain_users(self) -> typing.Iterable[DomainUserEntry]:
        if not self.with_addc:
            raise ValueError("ad dc not supported by configuration")
        ds_name: str = self.iconfig["domain_settings"]
        dusers = self.gconfig.data.get("domain_users", {}).get(ds_name, [])
        for n, entry in enumerate(dusers):
            yield DomainUserEntry(self, entry, n)

    def domain_groups(self) -> typing.Iterable[DomainGroupEntry]:
        if not self.with_addc:
            raise ValueError("ad dc not supported by configuration")
        ds_name: str = self.iconfig["domain_settings"]
        dgroups = self.gconfig.data.get("domain_groups", {}).get(ds_name, [])
        for n, entry in enumerate(dgroups):
            yield DomainGroupEntry(self, entry, n)

    def organizational_units(self) -> typing.Iterable[OrganizationalUnitEntry]:
        if not self.with_addc:
            raise ValueError("ad dc not supported by configuration")
        ds_name: str = self.iconfig["domain_settings"]
        o_units = self.gconfig.data.get("organizational_units", {}).get(
            ds_name, []
        )
        for n, entry in enumerate(o_units):
            yield OrganizationalUnitEntry(self, entry, n)

    def __eq__(self, other: typing.Any) -> bool:
        if isinstance(other, InstanceConfig) and self.iconfig == other.iconfig:
            self_shares = _shares_data(self.gconfig, self.iconfig)
            other_shares = _shares_data(other.gconfig, other.iconfig)
            self_globals = _globals_data(self.gconfig, self.iconfig)
            other_globals = _globals_data(other.gconfig, other.iconfig)
            return (
                self_shares == other_shares and self_globals == other_globals
            )
        return False


class CTDBSambaConfig:
    def global_options(self) -> typing.Iterable[typing.Tuple[str, str]]:
        return [
            ("clustering", "yes"),
            ("ctdb:registry.tdb", "yes"),
            ("include", "registry"),
        ]

    def shares(self) -> typing.Iterable[ShareConfig]:
        return []


class ShareConfig:
    def __init__(
        self,
        conf: GlobalConfig,
        sharename: str,
        iconfig: typing.Optional[dict] = None,
    ) -> None:
        self.gconfig = conf
        self.name = sharename
        self.iconfig = iconfig or {}

    def share_options(self) -> typing.Iterable[typing.Tuple[str, str]]:
        """Iterate over share options."""
        share_section = self.gconfig.data["shares"][self.name]
        return iter(share_section.get("options", {}).items())

    def path(self) -> typing.Optional[str]:
        """Return the path value from the smb.conf options."""
        share_section = self.gconfig.data["shares"][self.name]
        try:
            return share_section["options"]["path"]
        except KeyError:
            return None

    def permissions_config(self) -> PermissionsConfig:
        """Return a permissions configuration for the share."""
        # each share can have it's own permissions config,
        # but if it does not it will default to the instance's
        # config
        try:
            share_perms = self.gconfig.data["shares"][self.name]["permissions"]
            return PermissionsConfig(share_perms)
        except KeyError:
            pass
        try:
            instance_perms = self.iconfig["permissions"]
            return PermissionsConfig(instance_perms)
        except KeyError:
            pass
        # use the internal defaults
        return PermissionsConfig({})


class UserEntry:
    def __init__(self, iconf: InstanceConfig, urec: dict, num: int):
        self.iconfig = iconf
        self.username = urec["name"]
        self.entry_num = num
        self._uid = urec.get("uid")
        self._gid = urec.get("gid")
        self._nt_passwd = str(urec.get("nt_hash", ""))
        self._plaintext_passwd = str(urec.get("password", ""))
        if self._uid is not None:
            if not isinstance(self._uid, int):
                raise ValueError("invalid uid value")
        if self._gid is not None:
            if not isinstance(self._gid, int):
                raise ValueError("invalid gid value")

    @property
    def uid(self) -> int:
        if self._uid:
            return self._uid
        return self.iconfig.uid_base() + self.entry_num

    @property
    def gid(self) -> int:
        if self._gid:
            return self._gid
        return self.iconfig.gid_base() + self.entry_num

    @property
    def dir(self) -> str:
        return "/invalid"

    @property
    def shell(self) -> str:
        return "/bin/false"

    @property
    def nt_passwd(self) -> bytes:
        # the json will store the hash as a hex encoded string
        return binascii.unhexlify(self._nt_passwd)

    @property
    def plaintext_passwd(self) -> str:
        return self._plaintext_passwd

    def passwd_fields(self) -> PasswdEntryTuple:
        # fields: name, passwd, uid, gid, GECOS, dir, shell
        return (
            self.username,
            "x",
            str(self.uid),
            str(self.gid),
            "",
            self.dir,
            self.shell,
        )

    def vgroup(self) -> GroupEntry:
        """In case there is no explicit group for the specified user. This
        handy method makes a "virtual" group based on the user info.
        """
        return GroupEntry(
            self.iconfig, dict(name=self.username, gid=self.gid), 0
        )


class GroupEntry:
    def __init__(self, iconf: InstanceConfig, grec: dict, num: int):
        self.iconfig = iconf
        self.groupname = grec["name"]
        self.entry_num = num
        self._gid = grec.get("gid")
        self.ou = grec.get("ou")
        if self._gid is not None:
            if not isinstance(self._gid, int):
                raise ValueError("invalid gid value")

    @property
    def gid(self) -> int:
        if self._gid:
            return self._gid
        return self.iconfig.gid_base() + self.entry_num

    def group_fields(self) -> GroupEntryTuple:
        # fields: name, passwd, gid, members(comma separated)
        return (self.groupname, "x", str(self.gid), "")


class DomainConfig:
    def __init__(self, drec: dict, instance_name: str):
        self.realm = drec["realm"]
        self.short_domain = drec.get("short_domain", "")
        self.admin_password = drec.get("admin_password", "")
        self.interface_config = DCInterfaceConfig(drec.get("interfaces", {}))
        self.dcname = instance_name


class DCInterfaceConfig:
    def __init__(self, iface_config: dict) -> None:
        self.include_pattern: str = iface_config.get("include_pattern", "")
        self.exclude_pattern: str = iface_config.get("exclude_pattern", "")

    @property
    def configured(self) -> bool:
        """Return true if at least one interface property has been set."""
        return bool(self.include_pattern) or bool(self.exclude_pattern)


class DomainUserEntry(UserEntry):
    def __init__(self, iconf: InstanceConfig, urec: dict, num: int):
        super().__init__(iconf, urec, num)
        self.surname = urec.get("surname")
        self.given_name = urec.get("given_name")
        self.member_of = urec.get("member_of", [])
        self.ou = urec.get("ou")
        if not isinstance(self.member_of, list):
            raise ValueError("member_of should contain a list of group names")


class DomainGroupEntry(GroupEntry):
    pass


class OrganizationalUnitEntry:
    def __init__(self, iconf: InstanceConfig, urec: dict, num: int):
        self.iconfig = iconf
        self.ou_name = urec["name"]
        self.entry_num = num


class PermissionsConfig:
    _method_key: str = "method"
    _status_xattr_key: str = "status_xattr"
    _default_method: str = "none"
    _default_status_xattr: str = "user.share-perms-status"

    def __init__(self, pconf: dict[str, str]) -> None:
        self._pconf = pconf
        self.method: str = pconf.get(self._method_key, self._default_method)
        self.status_xattr: str = pconf.get(
            self._status_xattr_key, self._default_status_xattr
        )

    @property
    def options(self) -> dict[str, str]:
        filter_keys = {self._method_key, self._status_xattr_key}
        return {k: v for k, v in self._pconf.items() if k not in filter_keys}


def _shares_data(gconfig: GlobalConfig, iconfig: dict) -> list:
    try:
        shares = iconfig["shares"]
    except KeyError:
        return []
    return [gconfig.data["shares"][n] for n in shares]


def _globals_data(gconfig: GlobalConfig, iconfig: dict) -> list:
    try:
        gnames = iconfig["globals"]
    except KeyError:
        return []
    return [gconfig.data["globals"][n] for n in gnames]
07070100000031000081A4000000000000000000000001684BE19C0000109F000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/container_dns.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from __future__ import annotations

import json
import subprocess
import typing

from sambacc import samba_cmds

EXTERNAL: str = "external"
INTERNAL: str = "internal"


class HostState:
    T = typing.TypeVar("T", bound="HostState")

    def __init__(
        self, ref: str = "", items: typing.Optional[list[HostInfo]] = None
    ) -> None:
        self.ref: str = ref
        self.items: list[HostInfo] = items or []

    @classmethod
    def from_dict(cls: typing.Type[T], d: dict[str, typing.Any]) -> T:
        return cls(
            ref=d["ref"],
            items=[HostInfo.from_dict(i) for i in d.get("items", [])],
        )

    def __eq__(self, other: typing.Any) -> bool:
        return (
            self.ref == other.ref
            and len(self.items) == len(other.items)
            and all(s == o for (s, o) in zip(self.items, other.items))
        )


class HostInfo:
    T = typing.TypeVar("T", bound="HostInfo")

    def __init__(
        self, name: str = "", ipv4_addr: str = "", target: str = ""
    ) -> None:
        self.name = name
        self.ipv4_addr = ipv4_addr
        self.target = target

    @classmethod
    def from_dict(cls: typing.Type[T], d: dict[str, typing.Any]) -> T:
        return cls(
            name=d["name"],
            ipv4_addr=d["ipv4"],
            target=d.get("target", ""),
        )

    def __eq__(self, other: typing.Any) -> bool:
        return (
            self.name == other.name
            and self.ipv4_addr == other.ipv4_addr
            and self.target == other.target
        )


def parse(fh: typing.IO) -> HostState:
    return HostState.from_dict(json.load(fh))


def parse_file(path: str) -> HostState:
    with open(path) as fh:
        return parse(fh)


def match_target(state: HostState, target_name: str) -> list[HostInfo]:
    return [h for h in state.items if h.target == target_name]


def register(
    domain: str,
    hs: HostState,
    prefix: typing.Optional[list[str]] = None,
    target_name: str = EXTERNAL,
) -> bool:
    updated = False
    for item in match_target(hs, target_name):
        ip = item.ipv4_addr
        fqdn = "{}.{}".format(item.name, domain)
        cmd = samba_cmds.net["ads", "-P", "dns", "register", fqdn, ip]
        if prefix is not None:
            cmd.cmd_prefix = prefix
        subprocess.check_call(list(cmd))
        updated = True
    return updated


def parse_and_update(
    domain: str,
    source: str,
    previous: typing.Optional[HostState] = None,
    target_name: str = EXTERNAL,
    reg_func: typing.Callable = register,
) -> typing.Tuple[HostState, bool]:
    hs = parse_file(source)
    if previous is not None and hs == previous:
        # no changes
        return hs, False
    updated = reg_func(domain, hs, target_name=target_name)
    return hs, updated


# TODO: replace this with the common version added to simple_waiter
def watch(
    domain: str,
    source: str,
    update_func: typing.Callable,
    pause_func: typing.Callable,
    print_func: typing.Optional[typing.Callable],
) -> None:
    previous = None
    while True:
        try:
            previous, updated = update_func(domain, source, previous)
        except FileNotFoundError:
            if print_func:
                print_func(f"Source file [{source}] not found")
            updated = False
            previous = None
        if updated and print_func:
            print_func("Updating external dns registrations")
        try:
            pause_func()
        except KeyboardInterrupt:
            return
07070100000032000081A4000000000000000000000001684BE19C00006ED7000000000000000000000000000000000000002C00000000sambacc-v0.6+git.60.2f89a38/sambacc/ctdb.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import enum
import logging
import os
import subprocess
import time
import typing

from sambacc import config
from sambacc import leader
from sambacc import samba_cmds
from sambacc.jfile import ClusterMetaJSONFile
from sambacc.netcmd_loader import template_config
from sambacc.typelets import ExcType, ExcValue, ExcTraceback

_logger = logging.getLogger(__name__)


DB_DIR = "/var/lib/ctdb/persistent"
ETC_DIR = "/etc/ctdb"
SHARE_DIR = "/usr/share/ctdb"

CTDB_CONF: str = "/etc/ctdb/ctdb.conf"
CTDB_NODES: str = "/etc/ctdb/nodes"
ETC_SERVICES: str = "/etc/services"


class ClusterMetaObject(typing.Protocol):
    "A Cluster Meta Object can load or dump persistent cluster descriptions."

    def load(self) -> typing.Any:
        """Load a JSON-compatible object."""
        ...  # pragma: no cover

    def dump(self, data: typing.Any) -> None:
        """Dump (save) a JSON-compatible object."""
        ...  # pragma: no cover


class ClusterMeta(typing.Protocol):
    """ClusterMeta manages access to persistent cluster descriptions."""

    def open(
        self, *, read: bool = True, write: bool = False, locked: bool = False
    ) -> typing.ContextManager[ClusterMetaObject]:
        """Return a context manager with access to a cluster meta object."""
        ...  # pragma: no cover


class NodeState(str, enum.Enum):
    NEW = "new"
    READY = "ready"
    CHANGED = "changed"
    REPLACED = "replaced"
    GONE = "gone"  # reserved


def next_state(state: NodeState) -> NodeState:
    if state == NodeState.NEW:
        return NodeState.READY
    elif state == NodeState.CHANGED:
        return NodeState.REPLACED
    elif state == NodeState.REPLACED:
        return NodeState.READY
    return state


class NodeNotPresent(KeyError):
    def __init__(
        self,
        identity: typing.Any,
        pnn: typing.Optional[typing.Union[str, int]] = None,
    ) -> None:
        super().__init__(identity)
        self.identity = identity
        self.pnn = pnn


def ensure_smb_conf(
    iconfig: config.InstanceConfig, path: str = config.SMB_CONF
) -> None:
    """Ensure that the smb.conf on disk is ctdb and registry enabled."""
    with open(path, "w") as fh:
        write_smb_conf(fh, iconfig)


def write_smb_conf(fh: typing.IO, iconfig: config.InstanceConfig) -> None:
    """Write an smb.conf style output enabling ctdb and samba registry."""
    template_config(fh, iconfig.ctdb_smb_config())


def ensure_ctdb_conf(
    iconfig: config.InstanceConfig, path: str = CTDB_CONF
) -> None:
    """Ensure that the ctdb.conf on disk matches our desired state."""
    with open(path, "w") as fh:
        write_ctdb_conf(fh, iconfig.ctdb_config())


def write_ctdb_conf(
    fh: typing.IO, ctdb_params: dict, enc: typing.Callable = str
) -> None:
    """Write a ctdb.conf style output."""

    def _write_param(name: str, key: str) -> None:
        value = ctdb_params.get(key)
        if value is None:
            return
        fh.write(enc(f"{name} = {value}\n"))

    fh.write(enc("[logging]\n"))
    _write_param("log level", "log_level")
    fh.write(enc("\n"))
    fh.write(enc("[cluster]\n"))
    _write_param("recovery lock", "recovery_lock")
    if ctdb_params.get("nodes_cmd"):
        nodes_cmd = ctdb_params["nodes_cmd"]
        fh.write(enc(f"nodes list = !{nodes_cmd}"))
    fh.write(enc("\n"))
    fh.write(enc("[legacy]\n"))
    _write_param("realtime scheduling", "realtime_scheduling")
    _write_param("script log level", "script_log_level")
    fh.write(enc("\n"))


def ensure_ctdb_nodes(
    ctdb_nodes: list[str], real_path: str, canon_path: str = CTDB_NODES
) -> None:
    """Ensure a real nodes file exists, containing the specificed content,
    and has a symlink in the proper place for ctdb.
    """
    try:
        os.unlink(canon_path)
    except FileNotFoundError:
        pass
    os.symlink(real_path, canon_path)
    # XXX: add locking?
    with open(real_path, "w") as fh:
        write_nodes_file(fh, ctdb_nodes)


def write_nodes_file(
    fh: typing.IO, ctdb_nodes: list[str], enc: typing.Callable = str
) -> None:
    """Write the ctdb nodes file."""
    for node in ctdb_nodes:
        fh.write(enc(f"{node}\n"))


def read_nodes_file(fh: typing.IO) -> list[str]:
    """Read content from an open ctdb nodes file."""
    entries = []
    for line in fh:
        entries.append(line.strip())
    return entries


def read_ctdb_nodes(path: str = CTDB_NODES) -> list[str]:
    """Read the content of the ctdb nodes file."""
    try:
        with open(path, "r") as fh:
            entries = read_nodes_file(fh)
    except FileNotFoundError:
        return []
    return entries


def _svc_match(service_name: str, line: str) -> bool:
    if not line.strip() or line.startswith("#"):
        return False
    first = line.split()[0]
    return first == service_name


def ensure_ctdb_port_in_services(port: int, path: str) -> None:
    try:
        with open(path, "r") as fh:
            lines = [line.strip() for line in fh]
    except FileNotFoundError:
        lines = []
    cleaned = [line for line in lines if not _svc_match("ctdb", line)]
    cleaned.append(f"ctdb  {port}/tcp   # custom ctdb port")
    cleaned.append(f"ctdb  {port}/udp   # custom ctdb port")
    with open(path, "w") as fh:
        for line in cleaned:
            fh.write(line)
            fh.write("\n")


class PublicAddrAssignment(typing.TypedDict):
    address: str
    interfaces: list[str]


def _ensure_public_addresses_file(
    path: str, addrs: list[PublicAddrAssignment]
) -> None:
    with open(path, "w") as fh:
        _write_public_addresses_file(fh, addrs)


def _write_public_addresses_file(
    fh: typing.IO, addrs: list[PublicAddrAssignment]
) -> None:
    for entry in addrs:
        fh.write(entry["address"])
        if entry["interfaces"]:
            ifaces = ",".join(entry["interfaces"])
            fh.write(f" {ifaces}")
        fh.write("\n")


def ensure_ctdb_node_present(
    node: str,
    real_path: str,
    canon_path: str = CTDB_NODES,
    expected_pnn: typing.Optional[int] = None,
) -> None:
    """Ensure that the ctdb nodes file is populated with at least the
    node given. The optional `expect_pnn` can be provided to ensure that
    the node occupies the correct position in the nodes file.
    """
    nodes = read_ctdb_nodes(real_path)
    if node not in nodes:
        nodes.append(node)
    if expected_pnn is not None:
        try:
            found_pnn = nodes.index(node)
        except ValueError:
            found_pnn = -1
        if expected_pnn != found_pnn:
            raise ValueError(f"expected pnn {expected_pnn} is not {found_pnn}")
    ensure_ctdb_nodes(nodes, real_path=real_path, canon_path=canon_path)


def add_node_to_statefile(
    identity: str, node: str, pnn: int, path: str, in_nodes: bool = False
) -> None:
    """Add the given node's identity, (node) IP, and PNN to the JSON based
    state file, located at `path`. If in_nodes is true, the state file will
    reflect that the node is already added to the CTDB nodes file.
    """
    add_node_to_cluster_meta(
        ClusterMetaJSONFile(path), identity, node, pnn, in_nodes=in_nodes
    )


def add_node_to_cluster_meta(
    cmeta: ClusterMeta,
    identity: str,
    node: str,
    pnn: int,
    in_nodes: bool = False,
) -> None:
    """Add the given node's identity, (node) IP, and PNN to the cluster
    metadata.  If in_nodes is true, the state file will reflect that the node
    is already added to the CTDB nodes file.
    """
    with cmeta.open(write=True, locked=True) as cmo:
        data = cmo.load()
        _update_statefile(data, identity, node, pnn, in_nodes=in_nodes)
        cmo.dump(data)


def refresh_node_in_statefile(
    identity: str, node: str, pnn: int, path: str
) -> None:
    """Assuming the node is already in the statefile, update the state in
    the case that the node (IP) has changed.
    """
    refresh_node_in_cluster_meta(
        ClusterMetaJSONFile(path), identity, node, pnn
    )


def refresh_node_in_cluster_meta(
    cmeta: ClusterMeta, identity: str, node: str, pnn: int
) -> None:
    """Assuming the node is already in the cluster metadata, update the state
    in the case that the node (IP) has changed.
    """
    with cmeta.open(write=True, locked=True) as cmo:
        data = cmo.load()
        _refresh_statefile(data, identity, node, pnn)
        cmo.dump(data)


def _update_statefile(
    data: dict[str, typing.Any],
    identity: str,
    node: str,
    pnn: int,
    in_nodes: bool = False,
) -> None:
    data.setdefault("nodes", [])
    for entry in data["nodes"]:
        if pnn == entry["pnn"]:
            raise ValueError("duplicate pnn")
        if identity == entry["identity"]:
            raise ValueError("duplicate identity")
    state = NodeState.NEW
    if in_nodes:
        state = NodeState.READY
    data["nodes"].append(
        {
            "identity": identity,
            "node": node,
            "pnn": pnn,
            "state": state,
        }
    )


def _refresh_statefile(
    data: dict[str, typing.Any],
    identity: str,
    node: str,
    pnn: int,
    in_nodes: bool = False,
) -> None:
    data.setdefault("nodes", [])
    node_entry = None
    for entry in data["nodes"]:
        if pnn == entry["pnn"] and identity == entry["identity"]:
            node_entry = entry
            break
        if pnn == entry["pnn"]:
            raise ValueError(
                f"matching pnn ({pnn}) identity={entry['identity']}"
            )
    if not node_entry:
        raise NodeNotPresent(identity, pnn)
    if node_entry["node"] == node:
        # do nothing
        return
    node_entry["node"] = node
    node_entry["state"] = NodeState.CHANGED


def _get_state(entry: dict[str, typing.Any]) -> NodeState:
    return NodeState(entry["state"])


def _get_state_ok(entry: dict[str, typing.Any]) -> bool:
    return _get_state(entry) == NodeState.READY


def pnn_in_nodes(pnn: int, nodes_json: str, real_path: str) -> bool:
    """Returns true if the specified pnn has an entry in the nodes json
    file and that the node is already added to the ctdb nodes file.
    """
    return pnn_in_cluster_meta(ClusterMetaJSONFile(nodes_json), pnn)


def pnn_in_cluster_meta(cmeta: ClusterMeta, pnn: int) -> bool:
    """Returns true if the specified pnn has an entry in the cluster metadata
    and that entry is ready for use.
    """
    with cmeta.open(locked=True) as cmo:
        json_data = cmo.load()
    current_nodes = json_data.get("nodes", [])
    for entry in current_nodes:
        if pnn == entry["pnn"] and _get_state_ok(entry):
            return True
    return False


def manage_nodes(
    pnn: int,
    nodes_json: str,
    real_path: str,
    pause_func: typing.Callable,
) -> None:
    """Monitor nodes json for updates, reflecting those changes into ctdb."""
    manage_cluster_meta_updates(
        ClusterMetaJSONFile(nodes_json),
        pnn,
        real_path,
        pause_func,
    )


def manage_cluster_meta_updates(
    cmeta: ClusterMeta,
    pnn: int,
    real_path: str,
    pause_func: typing.Callable,
) -> None:
    """Monitor cluster meta for updates, reflecting those changes into ctdb."""
    while True:
        _logger.info("checking if node is able to make updates")
        if _node_check(cmeta, pnn, real_path):
            _logger.info("checking for node updates")
            if _node_update(cmeta, real_path):
                _logger.info("updated nodes")
        else:
            _logger.warning("node can not make updates")
        pause_func()


def _node_check(cmeta: ClusterMeta, pnn: int, real_path: str) -> bool:
    with cmeta.open(locked=True) as cmo:
        desired = cmo.load().get("nodes", [])
    ctdb_nodes = read_ctdb_nodes(real_path)
    # first: check to see if the current node is in the nodes file
    try:
        my_desired = [e for e in desired if e.get("pnn") == pnn][0]
    except IndexError:
        # no entry found for this node
        _logger.warning(f"PNN {pnn} not found in json state file")
        return False
    if my_desired["node"] not in ctdb_nodes:
        # this current node is not in the nodes file.
        # it is ineligible to make changes to the nodes file
        return False
    # this node is already in the nodes file!
    return True


def _node_update_check(
    json_data: dict[str, typing.Any], real_path: str
) -> tuple[list[str], list[typing.Any], list[typing.Any]]:
    desired = json_data.get("nodes", [])
    ctdb_nodes = read_ctdb_nodes(real_path)
    update_nodes = []
    need_reload = []
    _update_states = (NodeState.NEW, NodeState.CHANGED, NodeState.REPLACED)
    for entry in desired:
        pnn = entry["pnn"]
        matched = _node_line(ctdb_nodes, pnn) == entry["node"]
        if matched and _get_state_ok(entry):
            # everything's fine. skip this entry
            continue
        elif not matched:
            if entry["state"] in _update_states:
                update_nodes.append(entry)
                need_reload.append(entry)
            elif entry["state"] == NodeState.READY:
                msg = f"ready node (pnn {pnn}) missing from {ctdb_nodes}"
                raise ValueError(msg)
        else:
            # node present but state indicates
            # update is not finalized
            need_reload.append(entry)
    return ctdb_nodes, update_nodes, need_reload


def _node_line(ctdb_nodes: list[str], pnn: int) -> str:
    try:
        return ctdb_nodes[pnn]
    except IndexError:
        return ""


def _entry_to_node(ctdb_nodes: list[str], entry: dict[str, typing.Any]) -> str:
    pnn: int = entry["pnn"]
    if entry["state"] == NodeState.CHANGED or entry["state"] == NodeState.GONE:
        return "#{}".format(ctdb_nodes[pnn].strip("#"))
    return entry["node"]


def _node_update(cmeta: ClusterMeta, real_path: str) -> bool:
    # open r/o so that we don't initailly open for write.  we do a probe and
    # decide if anything needs to be updated if we are wrong, its not a
    # problem, we'll "time out" and reprobe later
    with cmeta.open(locked=True) as cmo:
        json_data = cmo.load()
        _, test_chg_nodes, test_need_reload = _node_update_check(
            json_data, real_path
        )
        if not test_chg_nodes and not test_need_reload:
            _logger.info("examined nodes state - no changes")
            return False
    # we probably need to make a change. but we recheck our state again
    # under lock, with the data file open r/w
    # update the nodes file and make changes to ctdb
    with cmeta.open(write=True, locked=True) as cmo:
        json_data = cmo.load()
        ctdb_nodes, chg_nodes, need_reload = _node_update_check(
            json_data, real_path
        )
        if not chg_nodes and not need_reload:
            _logger.info("reexamined nodes state - no changes")
            return False
        _logger.info("writing updates to ctdb nodes file")
        new_ctdb_nodes = list(ctdb_nodes)
        for entry in chg_nodes:
            pnn = entry["pnn"]
            expected_line = _entry_to_node(ctdb_nodes, entry)
            if _node_line(new_ctdb_nodes, pnn) == expected_line:
                continue
            if entry["state"] == NodeState.NEW:
                if pnn != len(new_ctdb_nodes):
                    raise ValueError(
                        f"unexpected pnn in new entry {entry}:"
                        " nodes: {new_ctdb_nodes}"
                    )
                new_ctdb_nodes.append(expected_line)
            else:
                new_ctdb_nodes[pnn] = expected_line
        _save_nodes(real_path, new_ctdb_nodes)
        _logger.info("running: ctdb reloadnodes")
        subprocess.check_call(list(samba_cmds.ctdb["reloadnodes"]))
        for entry in need_reload:
            entry["state"] = next_state(entry["state"])
            _logger.debug(
                "setting node identity=[{}] pnn={} to {}".format(
                    entry["identity"],
                    entry["pnn"],
                    entry["state"],
                )
            )
        cmo.dump(json_data)
    return True


def cluster_meta_to_nodes(
    cmeta: ClusterMeta, dest: typing.Union[str, typing.IO]
) -> None:
    """Write a nodes file based on the current content of the cluster
    metadata."""
    with cmeta.open(locked=True) as cmo:
        json_data = cmo.load()
        nodes = json_data.get("nodes", [])
        _logger.info("Found node metadata: %r", nodes)
        ctdb_nodes = _cluster_meta_to_ctdb_nodes(nodes)
        if isinstance(dest, str):
            _logger.info("Will write nodes: %s", ctdb_nodes)
            _save_nodes(dest, ctdb_nodes)
        else:
            write_nodes_file(dest, ctdb_nodes)


def _cluster_meta_to_ctdb_nodes(nodes: list[dict]) -> list[str]:
    pnn_max = max(n["pnn"] for n in nodes) + 1  # pnn is zero indexed
    ctdb_nodes: list[str] = [""] * pnn_max
    for entry in nodes:
        pnn = entry["pnn"]
        # overwrite the pnn indexed entry with expected value
        ctdb_nodes[pnn] = _entry_to_node(ctdb_nodes, entry)
    return ctdb_nodes


def _save_nodes(path: str, ctdb_nodes: list[str]) -> None:
    with open(path, "w") as nffh:
        write_nodes_file(nffh, ctdb_nodes)
        nffh.flush()
        os.fsync(nffh)


def monitor_cluster_meta_changes(
    cmeta: ClusterMeta,
    pause_func: typing.Callable,
    *,
    nodes_file_path: typing.Optional[str] = None,
    reload_all: bool = False,
    leader_locator: typing.Optional[leader.LeaderLocator] = None,
) -> None:
    """Monitor cluster meta for changes, reflecting those changes into ctdb.

    Unlike manage_cluster_meta_updates this function never changes the
    contents of the nodes list in the cluster meta and takes those values
    as a given, assuming some external agent has the correct global view of
    the cluster and is updating it correctly. This function exists to
    translate that content into something ctdb can understand.
    """
    prev_meta: dict[str, typing.Any] = {}
    if nodes_file_path:
        prev_nodes = read_ctdb_nodes(nodes_file_path)
    else:
        with cmeta.open(locked=True) as cmo:
            meta1 = cmo.load()
        prev_nodes = _cluster_meta_to_ctdb_nodes(meta1.get("nodes", []))
    _logger.debug("initial cluster meta content: %r", prev_meta)
    _logger.debug("initial nodes content: %r", prev_nodes)
    while True:
        pause_func()
        with cmeta.open(locked=True) as cmo:
            curr_meta = cmo.load()
        if curr_meta == prev_meta:
            _logger.debug("cluster meta content unchanged: %r", curr_meta)
            continue
        if len(prev_meta) > 0 and len(curr_meta) == 0:
            # cluster is possibly (probably?) being destroyed.
            # Return from this loop and let the command-level loop decide if
            # this function needs to be restarted or not. There's a chance this
            # process will be terminated very soon anyway.
            _logger.warning("no current nodes available")
            return
        _logger.info("cluster meta content changed")
        _logger.debug(
            "cluster meta: previous=%r current=%r", prev_meta, curr_meta
        )
        prev_meta = curr_meta

        # maybe some other metadata changed?
        expected_nodes = _cluster_meta_to_ctdb_nodes(
            curr_meta.get("nodes", [])
        )
        if prev_nodes == expected_nodes:
            _logger.debug("ctdb nodes list unchanged: %r", expected_nodes)
            continue
        _logger.info("ctdb nodes list changed")
        _logger.debug(
            "nodes list: previous=%r current=%r", prev_nodes, expected_nodes
        )
        prev_nodes = expected_nodes

        if nodes_file_path:
            _logger.info("updating nodes file: %s", nodes_file_path)
            _save_nodes(nodes_file_path, expected_nodes)
        _maybe_reload_nodes_retry(leader_locator, reload_all=reload_all)


def _maybe_reload_nodes_retry(
    leader_locator: typing.Optional[leader.LeaderLocator] = None,
    reload_all: bool = False,
    *,
    tries: int = 5,
) -> None:
    for idx in range(tries):
        time.sleep(1 << idx)
        try:
            _maybe_reload_nodes(leader_locator, reload_all=reload_all)
            return
        except subprocess.CalledProcessError:
            _logger.exception("failed to execute reload nodes command")
    raise RuntimeError("exceeded retries running reload nodes command")


def _maybe_reload_nodes(
    leader_locator: typing.Optional[leader.LeaderLocator] = None,
    reload_all: bool = False,
) -> None:
    """Issue a reloadnodes command if leader_locator is available and
    node is leader or reload_all is true.
    """
    if reload_all:
        _logger.info("running: ctdb reloadnodes")
        subprocess.check_call(list(samba_cmds.ctdb["reloadnodes"]))
        return
    if leader_locator is None:
        _logger.warning("no leader locator: not calling reloadnodes")
        return
    # use the leader locator to only issue the reloadnodes command once
    # for a change instead of all the nodes "spamming" the cluster
    with leader_locator as ll:
        if ll.is_leader():
            _logger.info("running: ctdb reloadnodes")
            subprocess.check_call(list(samba_cmds.ctdb["reloadnodes"]))
        else:
            _logger.info("node is not leader. skipping reloadnodes")


def ensure_ctdbd_etc_files(
    etc_path: str = ETC_DIR,
    src_path: str = SHARE_DIR,
    *,
    iconfig: typing.Optional[config.InstanceConfig] = None,
    services_path: str = ETC_SERVICES,
) -> None:
    """Ensure certain files that ctdbd expects to exist in its etc dir
    do exist.
    """
    functions_src = os.path.join(src_path, "functions")
    functions_dst = os.path.join(etc_path, "functions")
    notify_src = os.path.join(src_path, "notify.sh")
    notify_dst = os.path.join(etc_path, "notify.sh")
    legacy_scripts_src = os.path.join(src_path, "events/legacy")
    legacy_scripts_dst = os.path.join(etc_path, "events/legacy")
    link_legacy_scripts = ["00.ctdb.script"]

    public_addresses: list[PublicAddrAssignment] = []
    custom_ctdb_port = 0
    if iconfig:
        ctdb_conf = iconfig.ctdb_config()
        # todo: when we have a real config object for ctdb conf we can drop
        # the typing.cast
        public_addresses = typing.cast(
            list[PublicAddrAssignment], ctdb_conf.get("public_addresses", [])
        )
        custom_ctdb_port = typing.cast(int, ctdb_conf.get("ctdb_port", 0))
    if public_addresses:
        link_legacy_scripts.append("10.interface.script")

    os.makedirs(etc_path, exist_ok=True)
    try:
        os.unlink(functions_dst)
    except FileNotFoundError:
        pass
    os.symlink(functions_src, functions_dst)

    try:
        os.unlink(notify_dst)
    except FileNotFoundError:
        pass
    os.symlink(notify_src, notify_dst)

    os.makedirs(legacy_scripts_dst, exist_ok=True)
    for legacy_script_name in link_legacy_scripts:
        lscript_src = os.path.join(legacy_scripts_src, legacy_script_name)
        lscript_dst = os.path.join(legacy_scripts_dst, legacy_script_name)
        try:
            os.unlink(lscript_dst)
        except FileNotFoundError:
            pass
        os.symlink(lscript_src, lscript_dst)

    if public_addresses:
        pa_path = os.path.join(etc_path, "public_addresses")
        _ensure_public_addresses_file(pa_path, public_addresses)
    if custom_ctdb_port:
        ensure_ctdb_port_in_services(custom_ctdb_port, services_path)


_SRC_TDB_FILES = [
    "account_policy.tdb",
    "group_mapping.tdb",
    "passdb.tdb",
    "registry.tdb",
    "secrets.tdb",
    "share_info.td",
    "winbindd_idmap.tdb",
]

_SRC_TDB_DIRS = [
    "/var/lib/samba",
    "/var/lib/samba/private",
]


def migrate_tdb(
    iconfig: config.InstanceConfig, dest_dir: str, pnn: int = 0
) -> None:
    """Migrate TDB files into CTDB."""
    # TODO: these paths should be based on our instance config, not hard coded
    for tdbfile in _SRC_TDB_FILES:
        for parent in _SRC_TDB_DIRS:
            tdb_path = os.path.join(parent, tdbfile)
            if _has_tdb_file(tdb_path):
                _convert_tdb_file(tdb_path, dest_dir, pnn=pnn)


def _has_tdb_file(tdb_path: str) -> bool:
    # TODO: It would be preferable to handle errors from the convert
    # function only, but it if ltdbtool is missing it raises FileNotFoundError
    # and its not simple to disambiguate between the command missing and the
    # tdb file missing.
    _logger.info(f"Checking for {tdb_path}")
    return os.path.isfile(tdb_path)


def _convert_tdb_file(tdb_path: str, dest_dir: str, pnn: int = 0) -> None:
    orig_name = os.path.basename(tdb_path)
    opath = os.path.join(dest_dir, f"{orig_name}.{pnn}")
    _logger.info(f"Converting {tdb_path} to {opath} ...")
    cmd = samba_cmds.ltdbtool["convert", "-s0", tdb_path, opath]
    subprocess.check_call(list(cmd))


def archive_tdb(iconfig: config.InstanceConfig, dest_dir: str) -> None:
    """Arhive TDB files into a given directory."""
    # TODO: these paths should be based on our instance config, not hard coded
    try:
        os.mkdir(dest_dir)
        _logger.debug("dest_dir: %r created", dest_dir)
    except FileExistsError:
        _logger.debug("dest_dir: %r already exists", dest_dir)
    for tdbfile in _SRC_TDB_FILES:
        for parent in _SRC_TDB_DIRS:
            tdb_path = os.path.join(parent, tdbfile)
            if _has_tdb_file(tdb_path):
                dest_path = os.path.join(dest_dir, tdbfile)
                _logger.info("archiving: %r -> %r", tdb_path, dest_path)
                os.rename(tdb_path, dest_path)


def check_nodestatus(cmd: samba_cmds.CommandArgs = samba_cmds.ctdb) -> None:
    cmd_ctdb_check = cmd["nodestatus"]
    samba_cmds.execute(cmd_ctdb_check)


def _read_command_pnn(cmd: samba_cmds.CommandArgs) -> typing.Optional[int]:
    """Run a ctdb command assuming it returns a pnn value. Return the pnn as an
    int on success, None on command failure.
    """
    try:
        out = subprocess.check_output(list(cmd))
        pnntxt = out.decode("utf8").strip()
    except subprocess.CalledProcessError as err:
        _logger.error(f"command {cmd!r} failed: {err!r}")
        return None
    except FileNotFoundError:
        _logger.error(f"ctdb command ({cmd!r}) not found")
        return None
    try:
        return int(pnntxt)
    except ValueError:
        _logger.debug(f"ctdb command wrote invalid pnn: {pnntxt!r}")
        return None


def current_pnn() -> typing.Optional[int]:
    """Run the `ctdb pnn` command. Returns the pnn value or None if the command
    fails.
    """
    return _read_command_pnn(samba_cmds.ctdb["pnn"])


def leader_pnn() -> typing.Optional[int]:
    """Run the `ctdb leader` (or equivalent) command. Returns the pnn value or
    None if the command fails.
    """
    # recmaster command: <ctdb recmaster|leader>
    admin_cmd = samba_cmds.ctdb_leader_admin_cmd()
    return _read_command_pnn(samba_cmds.ctdb[admin_cmd])


class CLILeaderStatus:
    _isleader = False

    def is_leader(self) -> bool:
        return self._isleader


class CLILeaderLocator:
    """A leader locator that relies entirely on checking the
    recovery master using the ctdb command line tool.
    """

    def __enter__(self) -> CLILeaderStatus:
        mypnn = current_pnn()
        leader = leader_pnn()
        sts = CLILeaderStatus()
        sts._isleader = mypnn is not None and mypnn == leader
        return sts

    def __exit__(
        self, exc_type: ExcType, exc_val: ExcValue, exc_tb: ExcTraceback
    ) -> bool:
        return True
07070100000033000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002900000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc07070100000034000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/__init__.py07070100000035000081A4000000000000000000000001684BE19C00001660000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/backend.py#
# sambacc: a samba container configuration tool (and more)
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from typing import Any, Union, Optional

import dataclasses
import json
import os
import subprocess

from sambacc.typelets import Self
import sambacc.config
import sambacc.samba_cmds


@dataclasses.dataclass
class Versions:
    samba_version: str = ""
    sambacc_version: str = ""
    container_version: str = ""


@dataclasses.dataclass
class SessionCrypto:
    cipher: str
    degree: str

    @classmethod
    def load(cls, json_object: dict[str, Any]) -> Self:
        cipher = json_object.get("cipher", "")
        cipher = "" if cipher == "-" else cipher
        degree = json_object.get("degree", "")
        return cls(cipher=cipher, degree=degree)


@dataclasses.dataclass
class Session:
    session_id: str
    username: str
    groupname: str
    remote_machine: str
    hostname: str
    session_dialect: str
    uid: int
    gid: int
    encryption: Optional[SessionCrypto] = None
    signing: Optional[SessionCrypto] = None

    @classmethod
    def load(cls, json_object: dict[str, Any]) -> Self:
        _encryption = json_object.get("encryption")
        encryption = SessionCrypto.load(_encryption) if _encryption else None
        _signing = json_object.get("signing")
        signing = SessionCrypto.load(_signing) if _signing else None
        return cls(
            session_id=json_object.get("session_id", ""),
            username=json_object.get("username", ""),
            groupname=json_object.get("groupname", ""),
            remote_machine=json_object.get("remote_machine", ""),
            hostname=json_object.get("hostname", ""),
            session_dialect=json_object.get("session_dialect", ""),
            uid=int(json_object.get("uid", -1)),
            gid=int(json_object.get("gid", -1)),
            encryption=encryption,
            signing=signing,
        )


@dataclasses.dataclass
class TreeConnection:
    tcon_id: str
    session_id: str
    service_name: str

    @classmethod
    def load(cls, json_object: dict[str, Any]) -> Self:
        return cls(
            tcon_id=json_object.get("tcon_id", ""),
            session_id=json_object.get("session_id", ""),
            service_name=json_object.get("service", ""),
        )


@dataclasses.dataclass
class Status:
    timestamp: str
    version: str
    sessions: list[Session]
    tcons: list[TreeConnection]

    @classmethod
    def load(cls, json_object: dict[str, Any]) -> Self:
        return cls(
            timestamp=json_object.get("timestamp", ""),
            version=json_object.get("version", ""),
            sessions=[
                Session.load(v)
                for _, v in json_object.get("sessions", {}).items()
            ],
            tcons=[
                TreeConnection.load(v)
                for _, v in json_object.get("tcons", {}).items()
            ],
        )

    @classmethod
    def parse(cls, txt: Union[str, bytes]) -> Self:
        return cls.load(json.loads(txt))


class ControlBackend:
    def __init__(self, config: sambacc.config.InstanceConfig) -> None:
        self._config = config

    def _samba_version(self) -> str:
        smbd_ver = sambacc.samba_cmds.smbd["--version"]
        res = subprocess.run(list(smbd_ver), check=True, capture_output=True)
        return res.stdout.decode().strip()

    def _sambacc_version(self) -> str:
        try:
            import sambacc._version

            return sambacc._version.version
        except ImportError:
            return "(unknown)"

    def _container_version(self) -> str:
        return os.environ.get("SAMBA_CONTAINER_VERSION", "(unknown)")

    def get_versions(self) -> Versions:
        versions = Versions()
        versions.samba_version = self._samba_version()
        versions.sambacc_version = self._sambacc_version()
        versions.container_version = self._container_version()
        return versions

    def is_clustered(self) -> bool:
        return self._config.with_ctdb

    def get_status(self) -> Status:
        smbstatus = sambacc.samba_cmds.smbstatus["--json"]
        proc = subprocess.Popen(
            list(smbstatus),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        # TODO: the json output of smbstatus is potentially large
        # investigate streaming reads instead of fully buffered read
        # later
        stdout, stderr = proc.communicate()
        if proc.returncode != 0:
            raise RuntimeError(
                f"smbstatus error: {proc.returncode}: {stderr!r}"
            )
        return Status.parse(stdout)

    def close_share(self, share_name: str, denied_users: bool) -> None:
        _close = "close-denied-share" if denied_users else "close-share"
        cmd = sambacc.samba_cmds.smbcontrol["smbd", _close, share_name]
        subprocess.run(list(cmd), check=True)

    def kill_client(self, ip_address: str) -> None:
        cmd = sambacc.samba_cmds.smbcontrol[
            "smbd", "kill-client-ip", ip_address
        ]
        subprocess.run(list(cmd), check=True)
07070100000036000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/generated07070100000037000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000003F00000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/generated/__init__.py07070100000038000081A4000000000000000000000001684BE19C00001ED3000000000000000000000000000000000000004200000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/generated/control_pb2.py# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler.  DO NOT EDIT!
# source: control.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)

_sym_db = _symbol_database.Default()




DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcontrol.proto\"\r\n\x0bInfoRequest\"/\n\tSambaInfo\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x11\n\tclustered\x18\x02 \x01(\x08\"H\n\x12SambaContainerInfo\x12\x17\n\x0fsambacc_version\x18\x01 \x01(\t\x12\x19\n\x11\x63ontainer_version\x18\x02 \x01(\t\"Z\n\x0bGeneralInfo\x12\x1e\n\nsamba_info\x18\x01 \x01(\x0b\x32\n.SambaInfo\x12+\n\x0e\x63ontainer_info\x18\x02 \x01(\x0b\x32\x13.SambaContainerInfo\"\x0f\n\rStatusRequest\"/\n\rSessionCrypto\x12\x0e\n\x06\x63ipher\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65gree\x18\x02 \x01(\t\"\xe8\x01\n\x0bSessionInfo\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x11\n\tgroupname\x18\x03 \x01(\t\x12\x16\n\x0eremote_machine\x18\x04 \x01(\t\x12\x10\n\x08hostname\x18\x05 \x01(\t\x12\x17\n\x0fsession_dialect\x18\x06 \x01(\t\x12\x0b\n\x03uid\x18\x07 \x01(\r\x12\x0b\n\x03gid\x18\x08 \x01(\r\x12\"\n\nencryption\x18\t \x01(\x0b\x32\x0e.SessionCrypto\x12\x1f\n\x07signing\x18\n \x01(\x0b\x32\x0e.SessionCrypto\"E\n\x08\x43onnInfo\x12\x0f\n\x07tcon_id\x18\x01 \x01(\t\x12\x12\n\nsession_id\x18\x02 \x01(\t\x12\x14\n\x0cservice_name\x18\x03 \x01(\t\"k\n\nStatusInfo\x12\x18\n\x10server_timestamp\x18\x01 \x01(\t\x12\x1e\n\x08sessions\x18\x02 \x03(\x0b\x32\x0c.SessionInfo\x12#\n\x10tree_connections\x18\x03 \x03(\x0b\x32\t.ConnInfo\"=\n\x11\x43loseShareRequest\x12\x12\n\nshare_name\x18\x01 \x01(\t\x12\x14\n\x0c\x64\x65nied_users\x18\x02 \x01(\x08\"\x10\n\x0e\x43loseShareInfo\"\'\n\x11KillClientRequest\x12\x12\n\nip_address\x18\x01 \x01(\t\"\x10\n\x0eKillClientInfo2\xc9\x01\n\x0cSambaControl\x12\"\n\x04Info\x12\x0c.InfoRequest\x1a\x0c.GeneralInfo\x12%\n\x06Status\x12\x0e.StatusRequest\x1a\x0b.StatusInfo\x12\x31\n\nCloseShare\x12\x12.CloseShareRequest\x1a\x0f.CloseShareInfo\x12;\n\x14KillClientConnection\x12\x12.KillClientRequest\x1a\x0f.KillClientInfob\x06proto3')



_INFOREQUEST = DESCRIPTOR.message_types_by_name['InfoRequest']
_SAMBAINFO = DESCRIPTOR.message_types_by_name['SambaInfo']
_SAMBACONTAINERINFO = DESCRIPTOR.message_types_by_name['SambaContainerInfo']
_GENERALINFO = DESCRIPTOR.message_types_by_name['GeneralInfo']
_STATUSREQUEST = DESCRIPTOR.message_types_by_name['StatusRequest']
_SESSIONCRYPTO = DESCRIPTOR.message_types_by_name['SessionCrypto']
_SESSIONINFO = DESCRIPTOR.message_types_by_name['SessionInfo']
_CONNINFO = DESCRIPTOR.message_types_by_name['ConnInfo']
_STATUSINFO = DESCRIPTOR.message_types_by_name['StatusInfo']
_CLOSESHAREREQUEST = DESCRIPTOR.message_types_by_name['CloseShareRequest']
_CLOSESHAREINFO = DESCRIPTOR.message_types_by_name['CloseShareInfo']
_KILLCLIENTREQUEST = DESCRIPTOR.message_types_by_name['KillClientRequest']
_KILLCLIENTINFO = DESCRIPTOR.message_types_by_name['KillClientInfo']
InfoRequest = _reflection.GeneratedProtocolMessageType('InfoRequest', (_message.Message,), {
  'DESCRIPTOR' : _INFOREQUEST,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:InfoRequest)
  })
_sym_db.RegisterMessage(InfoRequest)

SambaInfo = _reflection.GeneratedProtocolMessageType('SambaInfo', (_message.Message,), {
  'DESCRIPTOR' : _SAMBAINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:SambaInfo)
  })
_sym_db.RegisterMessage(SambaInfo)

SambaContainerInfo = _reflection.GeneratedProtocolMessageType('SambaContainerInfo', (_message.Message,), {
  'DESCRIPTOR' : _SAMBACONTAINERINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:SambaContainerInfo)
  })
_sym_db.RegisterMessage(SambaContainerInfo)

GeneralInfo = _reflection.GeneratedProtocolMessageType('GeneralInfo', (_message.Message,), {
  'DESCRIPTOR' : _GENERALINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:GeneralInfo)
  })
_sym_db.RegisterMessage(GeneralInfo)

StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
  'DESCRIPTOR' : _STATUSREQUEST,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:StatusRequest)
  })
_sym_db.RegisterMessage(StatusRequest)

SessionCrypto = _reflection.GeneratedProtocolMessageType('SessionCrypto', (_message.Message,), {
  'DESCRIPTOR' : _SESSIONCRYPTO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:SessionCrypto)
  })
_sym_db.RegisterMessage(SessionCrypto)

SessionInfo = _reflection.GeneratedProtocolMessageType('SessionInfo', (_message.Message,), {
  'DESCRIPTOR' : _SESSIONINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:SessionInfo)
  })
_sym_db.RegisterMessage(SessionInfo)

ConnInfo = _reflection.GeneratedProtocolMessageType('ConnInfo', (_message.Message,), {
  'DESCRIPTOR' : _CONNINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:ConnInfo)
  })
_sym_db.RegisterMessage(ConnInfo)

StatusInfo = _reflection.GeneratedProtocolMessageType('StatusInfo', (_message.Message,), {
  'DESCRIPTOR' : _STATUSINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:StatusInfo)
  })
_sym_db.RegisterMessage(StatusInfo)

CloseShareRequest = _reflection.GeneratedProtocolMessageType('CloseShareRequest', (_message.Message,), {
  'DESCRIPTOR' : _CLOSESHAREREQUEST,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:CloseShareRequest)
  })
_sym_db.RegisterMessage(CloseShareRequest)

CloseShareInfo = _reflection.GeneratedProtocolMessageType('CloseShareInfo', (_message.Message,), {
  'DESCRIPTOR' : _CLOSESHAREINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:CloseShareInfo)
  })
_sym_db.RegisterMessage(CloseShareInfo)

KillClientRequest = _reflection.GeneratedProtocolMessageType('KillClientRequest', (_message.Message,), {
  'DESCRIPTOR' : _KILLCLIENTREQUEST,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:KillClientRequest)
  })
_sym_db.RegisterMessage(KillClientRequest)

KillClientInfo = _reflection.GeneratedProtocolMessageType('KillClientInfo', (_message.Message,), {
  'DESCRIPTOR' : _KILLCLIENTINFO,
  '__module__' : 'control_pb2'
  # @@protoc_insertion_point(class_scope:KillClientInfo)
  })
_sym_db.RegisterMessage(KillClientInfo)

_SAMBACONTROL = DESCRIPTOR.services_by_name['SambaControl']
if _descriptor._USE_C_DESCRIPTORS == False:

  DESCRIPTOR._options = None
  _INFOREQUEST._serialized_start=17
  _INFOREQUEST._serialized_end=30
  _SAMBAINFO._serialized_start=32
  _SAMBAINFO._serialized_end=79
  _SAMBACONTAINERINFO._serialized_start=81
  _SAMBACONTAINERINFO._serialized_end=153
  _GENERALINFO._serialized_start=155
  _GENERALINFO._serialized_end=245
  _STATUSREQUEST._serialized_start=247
  _STATUSREQUEST._serialized_end=262
  _SESSIONCRYPTO._serialized_start=264
  _SESSIONCRYPTO._serialized_end=311
  _SESSIONINFO._serialized_start=314
  _SESSIONINFO._serialized_end=546
  _CONNINFO._serialized_start=548
  _CONNINFO._serialized_end=617
  _STATUSINFO._serialized_start=619
  _STATUSINFO._serialized_end=726
  _CLOSESHAREREQUEST._serialized_start=728
  _CLOSESHAREREQUEST._serialized_end=789
  _CLOSESHAREINFO._serialized_start=791
  _CLOSESHAREINFO._serialized_end=807
  _KILLCLIENTREQUEST._serialized_start=809
  _KILLCLIENTREQUEST._serialized_end=848
  _KILLCLIENTINFO._serialized_start=850
  _KILLCLIENTINFO._serialized_end=866
  _SAMBACONTROL._serialized_start=869
  _SAMBACONTROL._serialized_end=1070
# @@protoc_insertion_point(module_scope)
07070100000039000081A4000000000000000000000001684BE19C000022C8000000000000000000000000000000000000004300000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/generated/control_pb2.pyi"""
@generated by mypy-protobuf.  Do not edit manually!
isort:skip_file
Use proto3 as the older protobuf we need for centos doesn't support
2023 edition.
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys

if sys.version_info >= (3, 8):
    import typing as typing_extensions
else:
    import typing_extensions

DESCRIPTOR: google.protobuf.descriptor.FileDescriptor

class InfoRequest(google.protobuf.message.Message):
    """--- Info ---
    Provide version numbers and basic information about the samba
    container instance. Mainly for debugging.
    """

    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    def __init__(
        self,
    ) -> None: ...

global___InfoRequest = InfoRequest

class SambaInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    VERSION_FIELD_NUMBER: builtins.int
    CLUSTERED_FIELD_NUMBER: builtins.int
    version: builtins.str
    clustered: builtins.bool
    def __init__(
        self,
        *,
        version: builtins.str = ...,
        clustered: builtins.bool = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["clustered", b"clustered", "version", b"version"]) -> None: ...

global___SambaInfo = SambaInfo

class SambaContainerInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    SAMBACC_VERSION_FIELD_NUMBER: builtins.int
    CONTAINER_VERSION_FIELD_NUMBER: builtins.int
    sambacc_version: builtins.str
    container_version: builtins.str
    def __init__(
        self,
        *,
        sambacc_version: builtins.str = ...,
        container_version: builtins.str = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["container_version", b"container_version", "sambacc_version", b"sambacc_version"]) -> None: ...

global___SambaContainerInfo = SambaContainerInfo

class GeneralInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    SAMBA_INFO_FIELD_NUMBER: builtins.int
    CONTAINER_INFO_FIELD_NUMBER: builtins.int
    @property
    def samba_info(self) -> global___SambaInfo: ...
    @property
    def container_info(self) -> global___SambaContainerInfo: ...
    def __init__(
        self,
        *,
        samba_info: global___SambaInfo | None = ...,
        container_info: global___SambaContainerInfo | None = ...,
    ) -> None: ...
    def HasField(self, field_name: typing_extensions.Literal["container_info", b"container_info", "samba_info", b"samba_info"]) -> builtins.bool: ...
    def ClearField(self, field_name: typing_extensions.Literal["container_info", b"container_info", "samba_info", b"samba_info"]) -> None: ...

global___GeneralInfo = GeneralInfo

class StatusRequest(google.protobuf.message.Message):
    """--- Status ---
    Fetch status information from the samba instance. Includes basic
    information about connected clients.
    """

    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    def __init__(
        self,
    ) -> None: ...

global___StatusRequest = StatusRequest

class SessionCrypto(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    CIPHER_FIELD_NUMBER: builtins.int
    DEGREE_FIELD_NUMBER: builtins.int
    cipher: builtins.str
    degree: builtins.str
    def __init__(
        self,
        *,
        cipher: builtins.str = ...,
        degree: builtins.str = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["cipher", b"cipher", "degree", b"degree"]) -> None: ...

global___SessionCrypto = SessionCrypto

class SessionInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    SESSION_ID_FIELD_NUMBER: builtins.int
    USERNAME_FIELD_NUMBER: builtins.int
    GROUPNAME_FIELD_NUMBER: builtins.int
    REMOTE_MACHINE_FIELD_NUMBER: builtins.int
    HOSTNAME_FIELD_NUMBER: builtins.int
    SESSION_DIALECT_FIELD_NUMBER: builtins.int
    UID_FIELD_NUMBER: builtins.int
    GID_FIELD_NUMBER: builtins.int
    ENCRYPTION_FIELD_NUMBER: builtins.int
    SIGNING_FIELD_NUMBER: builtins.int
    session_id: builtins.str
    username: builtins.str
    groupname: builtins.str
    remote_machine: builtins.str
    hostname: builtins.str
    session_dialect: builtins.str
    uid: builtins.int
    gid: builtins.int
    @property
    def encryption(self) -> global___SessionCrypto: ...
    @property
    def signing(self) -> global___SessionCrypto: ...
    def __init__(
        self,
        *,
        session_id: builtins.str = ...,
        username: builtins.str = ...,
        groupname: builtins.str = ...,
        remote_machine: builtins.str = ...,
        hostname: builtins.str = ...,
        session_dialect: builtins.str = ...,
        uid: builtins.int = ...,
        gid: builtins.int = ...,
        encryption: global___SessionCrypto | None = ...,
        signing: global___SessionCrypto | None = ...,
    ) -> None: ...
    def HasField(self, field_name: typing_extensions.Literal["encryption", b"encryption", "signing", b"signing"]) -> builtins.bool: ...
    def ClearField(self, field_name: typing_extensions.Literal["encryption", b"encryption", "gid", b"gid", "groupname", b"groupname", "hostname", b"hostname", "remote_machine", b"remote_machine", "session_dialect", b"session_dialect", "session_id", b"session_id", "signing", b"signing", "uid", b"uid", "username", b"username"]) -> None: ...

global___SessionInfo = SessionInfo

class ConnInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    TCON_ID_FIELD_NUMBER: builtins.int
    SESSION_ID_FIELD_NUMBER: builtins.int
    SERVICE_NAME_FIELD_NUMBER: builtins.int
    tcon_id: builtins.str
    session_id: builtins.str
    service_name: builtins.str
    def __init__(
        self,
        *,
        tcon_id: builtins.str = ...,
        session_id: builtins.str = ...,
        service_name: builtins.str = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["service_name", b"service_name", "session_id", b"session_id", "tcon_id", b"tcon_id"]) -> None: ...

global___ConnInfo = ConnInfo

class StatusInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    SERVER_TIMESTAMP_FIELD_NUMBER: builtins.int
    SESSIONS_FIELD_NUMBER: builtins.int
    TREE_CONNECTIONS_FIELD_NUMBER: builtins.int
    server_timestamp: builtins.str
    @property
    def sessions(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SessionInfo]: ...
    @property
    def tree_connections(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ConnInfo]: ...
    def __init__(
        self,
        *,
        server_timestamp: builtins.str = ...,
        sessions: collections.abc.Iterable[global___SessionInfo] | None = ...,
        tree_connections: collections.abc.Iterable[global___ConnInfo] | None = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["server_timestamp", b"server_timestamp", "sessions", b"sessions", "tree_connections", b"tree_connections"]) -> None: ...

global___StatusInfo = StatusInfo

class CloseShareRequest(google.protobuf.message.Message):
    """--- CloseShare ---
    Close shares to clients.
    """

    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    SHARE_NAME_FIELD_NUMBER: builtins.int
    DENIED_USERS_FIELD_NUMBER: builtins.int
    share_name: builtins.str
    denied_users: builtins.bool
    def __init__(
        self,
        *,
        share_name: builtins.str = ...,
        denied_users: builtins.bool = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["denied_users", b"denied_users", "share_name", b"share_name"]) -> None: ...

global___CloseShareRequest = CloseShareRequest

class CloseShareInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    def __init__(
        self,
    ) -> None: ...

global___CloseShareInfo = CloseShareInfo

class KillClientRequest(google.protobuf.message.Message):
    """--- KillClientConnection ---
    Forcibly disconnect a client.
    """

    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    IP_ADDRESS_FIELD_NUMBER: builtins.int
    ip_address: builtins.str
    def __init__(
        self,
        *,
        ip_address: builtins.str = ...,
    ) -> None: ...
    def ClearField(self, field_name: typing_extensions.Literal["ip_address", b"ip_address"]) -> None: ...

global___KillClientRequest = KillClientRequest

class KillClientInfo(google.protobuf.message.Message):
    DESCRIPTOR: google.protobuf.descriptor.Descriptor

    def __init__(
        self,
    ) -> None: ...

global___KillClientInfo = KillClientInfo
0707010000003A000081A4000000000000000000000001684BE19C00001A67000000000000000000000000000000000000004700000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/generated/control_pb2_grpc.py# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

from . import control_pb2 as control__pb2


class SambaControlStub(object):
    """--- define rpcs ---

    """

    def __init__(self, channel):
        """Constructor.

        Args:
            channel: A grpc.Channel.
        """
        self.Info = channel.unary_unary(
                '/SambaControl/Info',
                request_serializer=control__pb2.InfoRequest.SerializeToString,
                response_deserializer=control__pb2.GeneralInfo.FromString,
                )
        self.Status = channel.unary_unary(
                '/SambaControl/Status',
                request_serializer=control__pb2.StatusRequest.SerializeToString,
                response_deserializer=control__pb2.StatusInfo.FromString,
                )
        self.CloseShare = channel.unary_unary(
                '/SambaControl/CloseShare',
                request_serializer=control__pb2.CloseShareRequest.SerializeToString,
                response_deserializer=control__pb2.CloseShareInfo.FromString,
                )
        self.KillClientConnection = channel.unary_unary(
                '/SambaControl/KillClientConnection',
                request_serializer=control__pb2.KillClientRequest.SerializeToString,
                response_deserializer=control__pb2.KillClientInfo.FromString,
                )


class SambaControlServicer(object):
    """--- define rpcs ---

    """

    def Info(self, request, context):
        """Missing associated documentation comment in .proto file."""
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details('Method not implemented!')
        raise NotImplementedError('Method not implemented!')

    def Status(self, request, context):
        """Missing associated documentation comment in .proto file."""
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details('Method not implemented!')
        raise NotImplementedError('Method not implemented!')

    def CloseShare(self, request, context):
        """Missing associated documentation comment in .proto file."""
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details('Method not implemented!')
        raise NotImplementedError('Method not implemented!')

    def KillClientConnection(self, request, context):
        """Missing associated documentation comment in .proto file."""
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details('Method not implemented!')
        raise NotImplementedError('Method not implemented!')


def add_SambaControlServicer_to_server(servicer, server):
    rpc_method_handlers = {
            'Info': grpc.unary_unary_rpc_method_handler(
                    servicer.Info,
                    request_deserializer=control__pb2.InfoRequest.FromString,
                    response_serializer=control__pb2.GeneralInfo.SerializeToString,
            ),
            'Status': grpc.unary_unary_rpc_method_handler(
                    servicer.Status,
                    request_deserializer=control__pb2.StatusRequest.FromString,
                    response_serializer=control__pb2.StatusInfo.SerializeToString,
            ),
            'CloseShare': grpc.unary_unary_rpc_method_handler(
                    servicer.CloseShare,
                    request_deserializer=control__pb2.CloseShareRequest.FromString,
                    response_serializer=control__pb2.CloseShareInfo.SerializeToString,
            ),
            'KillClientConnection': grpc.unary_unary_rpc_method_handler(
                    servicer.KillClientConnection,
                    request_deserializer=control__pb2.KillClientRequest.FromString,
                    response_serializer=control__pb2.KillClientInfo.SerializeToString,
            ),
    }
    generic_handler = grpc.method_handlers_generic_handler(
            'SambaControl', rpc_method_handlers)
    server.add_generic_rpc_handlers((generic_handler,))


 # This class is part of an EXPERIMENTAL API.
class SambaControl(object):
    """--- define rpcs ---

    """

    @staticmethod
    def Info(request,
            target,
            options=(),
            channel_credentials=None,
            call_credentials=None,
            insecure=False,
            compression=None,
            wait_for_ready=None,
            timeout=None,
            metadata=None):
        return grpc.experimental.unary_unary(request, target, '/SambaControl/Info',
            control__pb2.InfoRequest.SerializeToString,
            control__pb2.GeneralInfo.FromString,
            options, channel_credentials,
            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

    @staticmethod
    def Status(request,
            target,
            options=(),
            channel_credentials=None,
            call_credentials=None,
            insecure=False,
            compression=None,
            wait_for_ready=None,
            timeout=None,
            metadata=None):
        return grpc.experimental.unary_unary(request, target, '/SambaControl/Status',
            control__pb2.StatusRequest.SerializeToString,
            control__pb2.StatusInfo.FromString,
            options, channel_credentials,
            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

    @staticmethod
    def CloseShare(request,
            target,
            options=(),
            channel_credentials=None,
            call_credentials=None,
            insecure=False,
            compression=None,
            wait_for_ready=None,
            timeout=None,
            metadata=None):
        return grpc.experimental.unary_unary(request, target, '/SambaControl/CloseShare',
            control__pb2.CloseShareRequest.SerializeToString,
            control__pb2.CloseShareInfo.FromString,
            options, channel_credentials,
            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

    @staticmethod
    def KillClientConnection(request,
            target,
            options=(),
            channel_credentials=None,
            call_credentials=None,
            insecure=False,
            compression=None,
            wait_for_ready=None,
            timeout=None,
            metadata=None):
        return grpc.experimental.unary_unary(request, target, '/SambaControl/KillClientConnection',
            control__pb2.KillClientRequest.SerializeToString,
            control__pb2.KillClientInfo.FromString,
            options, channel_credentials,
            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
0707010000003B000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/protobufs0707010000003C000081A4000000000000000000000001684BE19C000007F3000000000000000000000000000000000000004100000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/protobufs/control.proto// Use proto3 as the older protobuf we need for centos doesn't support
// 2023 edition.
syntax = "proto3";

// Some requests and respose types are currently empty. However, we don't use
// Empty in the case we want to extend them in the future.

// --- Info ---
// Provide version numbers and basic information about the samba
// container instance. Mainly for debugging.

message InfoRequest {}

message SambaInfo {
    string version = 1;
    bool clustered = 2;
}

message SambaContainerInfo {
    string sambacc_version = 1;
    string container_version = 2;
}

message GeneralInfo {
    SambaInfo samba_info = 1;
    SambaContainerInfo container_info = 2;
}

// --- Status ---
// Fetch status information from the samba instance. Includes basic
// information about connected clients.

message StatusRequest {}

message SessionCrypto {
    string cipher = 1;
    string degree = 2;
}

message SessionInfo {
    string session_id = 1;
    string username = 2;
    string groupname = 3;
    string remote_machine = 4;
    string hostname = 5;
    string session_dialect = 6;
    uint32 uid = 7;
    uint32 gid = 8;
    SessionCrypto encryption = 9;
    SessionCrypto signing = 10;
}

message ConnInfo {
    string tcon_id = 1;
    string session_id = 2;
    string service_name = 3;
}

message StatusInfo {
    string server_timestamp = 1;
    repeated SessionInfo sessions = 2;
    repeated ConnInfo tree_connections = 3;
}

// --- CloseShare ---
// Close shares to clients.

message CloseShareRequest {
    string share_name = 1;
    bool denied_users = 2;
}

message CloseShareInfo {}

// --- KillClientConnection ---
// Forcibly disconnect a client.

message KillClientRequest {
    string ip_address = 1;
}

message KillClientInfo {}

// --- define rpcs ---

service SambaControl {
    rpc Info (InfoRequest) returns (GeneralInfo);
    rpc Status (StatusRequest) returns (StatusInfo);
    rpc CloseShare (CloseShareRequest) returns (CloseShareInfo);
    rpc KillClientConnection (KillClientRequest) returns (KillClientInfo);
}
0707010000003D000081A4000000000000000000000001684BE19C00001A79000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/grpc/server.py#
# sambacc: a samba container configuration tool (and more)
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from typing import Iterator, Protocol, Optional

import concurrent.futures
import contextlib
import logging

import grpc

import sambacc.grpc.backend as rbe
import sambacc.grpc.generated.control_pb2 as pb
import sambacc.grpc.generated.control_pb2_grpc as control_rpc

_logger = logging.getLogger(__name__)


class Backend(Protocol):
    def get_versions(self) -> rbe.Versions: ...

    def is_clustered(self) -> bool: ...

    def get_status(self) -> rbe.Status: ...

    def close_share(self, share_name: str, denied_users: bool) -> None: ...

    def kill_client(self, ip_address: str) -> None: ...


@contextlib.contextmanager
def _in_rpc(context: grpc.ServicerContext, allowed: bool) -> Iterator[None]:
    if not allowed:
        _logger.error("Blocking operation")
        context.abort(
            grpc.StatusCode.PERMISSION_DENIED, "Operation not permitted"
        )
    try:
        yield
    except Exception:
        _logger.exception("exception in rpc call")
        context.abort(grpc.StatusCode.UNKNOWN, "Unexpected server error")


def _get_info(backend: Backend) -> pb.GeneralInfo:
    _info = backend.get_versions()
    clustered = backend.is_clustered()
    return pb.GeneralInfo(
        samba_info=pb.SambaInfo(
            version=_info.samba_version,
            clustered=clustered,
        ),
        container_info=pb.SambaContainerInfo(
            sambacc_version=_info.sambacc_version,
            container_version=_info.container_version,
        ),
    )


def _convert_crypto(
    crypto: Optional[rbe.SessionCrypto],
) -> Optional[pb.SessionCrypto]:
    if not crypto:
        return None
    return pb.SessionCrypto(cipher=crypto.cipher, degree=crypto.degree)


def _convert_session(session: rbe.Session) -> pb.SessionInfo:
    info = pb.SessionInfo(
        session_id=session.session_id,
        username=session.username,
        groupname=session.groupname,
        remote_machine=session.remote_machine,
        hostname=session.hostname,
        session_dialect=session.session_dialect,
        encryption=_convert_crypto(session.encryption),
        signing=_convert_crypto(session.signing),
    )
    # python side takes -1 to mean not found uid/gid. in protobufs
    # that would mean the fields are unset
    if session.uid > 0:
        info.uid = session.uid
    if session.gid > 0:
        info.gid = session.gid
    return info


def _convert_tcon(tcon: rbe.TreeConnection) -> pb.ConnInfo:
    return pb.ConnInfo(
        tcon_id=tcon.tcon_id,
        session_id=tcon.session_id,
        service_name=tcon.service_name,
    )


def _convert_status(status: rbe.Status) -> pb.StatusInfo:
    return pb.StatusInfo(
        server_timestamp=status.timestamp,
        sessions=[_convert_session(s) for s in status.sessions],
        tree_connections=[_convert_tcon(t) for t in status.tcons],
    )


class ControlService(control_rpc.SambaControlServicer):
    def __init__(self, backend: Backend, *, read_only: bool = False):
        self._backend = backend
        self._read_only = read_only
        self._ok_to_read = True
        self._ok_to_modify = not read_only

    def Info(
        self, request: pb.InfoRequest, context: grpc.ServicerContext
    ) -> pb.GeneralInfo:
        _logger.debug("RPC Called: Info")
        with _in_rpc(context, self._ok_to_read):
            info = _get_info(self._backend)
        return info

    def Status(
        self, request: pb.StatusRequest, context: grpc.ServicerContext
    ) -> pb.StatusInfo:
        _logger.debug("RPC Called: Status")
        with _in_rpc(context, self._ok_to_read):
            info = _convert_status(self._backend.get_status())
        return info

    def CloseShare(
        self, request: pb.CloseShareRequest, context: grpc.ServicerContext
    ) -> pb.CloseShareInfo:
        _logger.debug("RPC Called: CloseShare")
        with _in_rpc(context, self._ok_to_modify):
            self._backend.close_share(request.share_name, request.denied_users)
            info = pb.CloseShareInfo()
        return info

    def KillClientConnection(
        self, request: pb.KillClientRequest, context: grpc.ServicerContext
    ) -> pb.KillClientInfo:
        _logger.debug("RPC Called: KillClientConnection")
        with _in_rpc(context, self._ok_to_modify):
            self._backend.kill_client(request.ip_address)
            info = pb.KillClientInfo()
        return info


class ServerConfig:
    max_workers: int = 8
    address: str = "localhost:54445"
    read_only: bool = False
    insecure: bool = True
    server_key: Optional[bytes] = None
    server_cert: Optional[bytes] = None
    ca_cert: Optional[bytes] = None


def serve(config: ServerConfig, backend: Backend) -> None:
    _logger.info(
        "Starting gRPC server on %s (%s, %s)",
        config.address,
        "insecure" if config.insecure else "tls",
        "read-only" if config.read_only else "read-modify",
    )
    service = ControlService(backend, read_only=config.read_only)
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=config.max_workers
    )
    server = grpc.server(executor)
    control_rpc.add_SambaControlServicer_to_server(service, server)
    if config.insecure:
        server.add_insecure_port(config.address)
    else:
        if not config.server_key:
            raise ValueError("missing server TLS key")
        if not config.server_cert:
            raise ValueError("missing server TLS cert")
        if config.ca_cert:
            creds = grpc.ssl_server_credentials(
                [(config.server_key, config.server_cert)],
                root_certificates=config.ca_cert,
                require_client_auth=True,
            )
        else:
            creds = grpc.ssl_server_credentials(
                [(config.server_key, config.server_cert)],
            )
        server.add_secure_port(config.address, creds)
    server.start()
    # hack for testing
    wait_fn = getattr(config, "wait", None)
    if wait_fn:
        wait_fn(server)
    else:
        server.wait_for_termination()
0707010000003E000081A4000000000000000000000001684BE19C00000C52000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/sambacc/inotify_waiter.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import os
import typing

import inotify_simple as _inotify  # type: ignore

DEFAULT_TIMEOUT = 300


class INotify:
    """A waiter that monitors a file path for changes, based on inotify.

    Inotify is used to monitor the specified path for changes (writes).
    It stops waiting when the file is changed or the timeout is reached.

    A `print_func` can be specified as a simple logging method.
    """

    timeout: int = DEFAULT_TIMEOUT
    print_func = None

    def __init__(
        self,
        path: str,
        print_func: typing.Optional[typing.Callable] = None,
        timeout: typing.Optional[int] = None,
    ) -> None:
        if timeout is not None:
            self.timeout = timeout
        self.print_func = print_func
        self._inotify = _inotify.INotify()
        dirpath, fpath = os.path.split(path)
        if not dirpath:
            dirpath = "."
        if not fpath:
            raise ValueError("a file path is required")
        self._dir = dirpath
        self._name = fpath
        self._mask = _inotify.flags.DELETE | _inotify.flags.CLOSE_WRITE
        self._inotify.add_watch(self._dir, self._mask)

    def close(self) -> None:
        self._inotify.close()

    def _print(self, msg: str) -> None:
        if self.print_func:
            self.print_func("[inotify waiter] {}".format(msg))

    def acted(self) -> None:
        return  # noop for inotify waiter

    def wait(self) -> None:
        next(self._wait())

    def _get_events(self) -> list[typing.Any]:
        timeout = 1000 * self.timeout
        self._print("waiting {}ms for activity...".format(timeout))
        events = self._inotify.read(timeout=timeout)
        if not events:
            # use "None" as a sentinel for a timeout, otherwise we can not
            # tell if its all events that didn't match or a true timeout
            return [None]
        # filter out events we don't care about
        return [
            event
            for event in events
            if (event.name == self._name)
            and ((event.mask & _inotify.flags.CLOSE_WRITE) != 0)
        ]

    def _wait(self) -> typing.Iterator[None]:
        while True:
            for event in self._get_events():
                if event is None:
                    self._print("timed out")
                    yield None
                else:
                    self._print(f"{self._name} modified")
                    yield None
0707010000003F000081A4000000000000000000000001684BE19C00000C2C000000000000000000000000000000000000002D00000000sambacc-v0.6+git.60.2f89a38/sambacc/jfile.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#
"""Utilities for working with JSON data stored in a file system file.
"""

import fcntl
import json
import os
import typing

from sambacc.typelets import ExcType, ExcValue, ExcTraceback, Self

OPEN_RO = os.O_RDONLY
OPEN_RW = os.O_CREAT | os.O_RDWR


def open(path: str, flags: int, mode: int = 0o644) -> typing.IO:
    """A wrapper around open to open JSON files for read or read/write.
    `flags` must be os.open type flags. Use `OPEN_RO` and `OPEN_RW` for
    convenience.
    """
    return os.fdopen(os.open(path, flags, mode), "r+")


def load(
    fh: typing.IO, default: typing.Optional[dict[str, typing.Any]] = None
) -> typing.Any:
    """Similar to json.load, but returns the `default` value if fh refers to an
    empty file. fh must be seekable."""
    if fh.read(4) == "":
        # probe it to see if its an empty file
        data = default
    else:
        fh.seek(0)
        data = json.load(fh)
    return data


def dump(data: typing.Any, fh: typing.IO) -> None:
    """Similar to json.dump, but truncates the file before writing in order
    to avoid appending data to the file. fh must be seekable.
    """
    fh.seek(0)
    fh.truncate(0)
    json.dump(data, fh)


def flock(fh: typing.IO) -> None:
    """A simple wrapper around flock."""
    fcntl.flock(fh.fileno(), fcntl.LOCK_EX)


class ClusterMetaJSONHandle:
    def __init__(self, fh: typing.IO) -> None:
        self._fh = fh

    def load(self) -> typing.Any:
        return load(self._fh, {})

    def dump(self, data: typing.Any) -> None:
        dump(data, self._fh)
        self._fh.flush()
        os.fsync(self._fh)

    def __enter__(self) -> Self:
        return self

    def __exit__(
        self, exc_type: ExcType, exc_val: ExcValue, exc_tb: ExcTraceback
    ) -> None:
        self._fh.close()


class ClusterMetaJSONFile:
    def __init__(self, path: str) -> None:
        self.path = path

    def open(
        self, *, read: bool = True, write: bool = False, locked: bool = False
    ) -> ClusterMetaJSONHandle:
        if read and write:
            flags = OPEN_RW
        elif read:
            flags = OPEN_RO
        else:
            raise ValueError("write-only not supported")
        fh = open(self.path, flags)
        try:
            if locked:
                flock(fh)
        except Exception:
            fh.close()
            raise
        return ClusterMetaJSONHandle(fh)
07070100000040000081A4000000000000000000000001684BE19C00001FD1000000000000000000000000000000000000002C00000000sambacc-v0.6+git.60.2f89a38/sambacc/join.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import enum
import errno
import json
import logging
import subprocess
import typing

from .opener import Opener, FileOpener
from sambacc import samba_cmds
from sambacc.simple_waiter import Waiter

_logger = logging.getLogger(__name__)


class JoinError(Exception):
    def __init__(self, v: typing.Any) -> None:
        super().__init__(v)
        self.errors: list[typing.Any] = []


_PROMPT = object()
_PT = typing.TypeVar("_PT")
_PW = typing.Union[str, _PT]


class JoinBy(enum.Enum):
    PASSWORD = "password"
    FILE = "file"
    INTERACTIVE = "interactive"
    ODJ_FILE = "odj_file"


class UserPass:
    """Encapsulate a username/password pair."""

    username: str = "Administrator"
    password: typing.Optional[_PW] = None

    def __init__(
        self,
        username: typing.Optional[str] = None,
        password: typing.Optional[_PW] = None,
    ) -> None:
        if username is not None:
            self.username = username
        if password is not None:
            self.password = password


class _JoinSource(typing.NamedTuple):
    method: JoinBy
    upass: typing.Optional[UserPass]
    path: str


class Joiner:
    """Utility class for joining to AD domain.

    Use the `add_source` method to add one or more sources of join auth
    data. Call `join` to commit and join the "host" to AD.
    """

    _net_ads_join = samba_cmds.net["ads", "join"]
    _requestodj = samba_cmds.net["offlinejoin", "requestodj"]

    def __init__(
        self,
        marker: typing.Optional[str] = None,
        *,
        opener: typing.Optional[Opener] = None,
    ) -> None:
        self._source_paths: list[str] = []
        self._sources: list[_JoinSource] = []
        self.marker = marker
        self._opener = opener or FileOpener()

    def add_file_source(self, path_or_uri: str) -> None:
        self._sources.append(_JoinSource(JoinBy.FILE, None, path_or_uri))

    def add_pw_source(self, value: UserPass) -> None:
        assert isinstance(value, UserPass)
        self._sources.append(_JoinSource(JoinBy.PASSWORD, value, ""))

    def add_interactive_source(self, value: UserPass) -> None:
        assert isinstance(value, UserPass)
        self._sources.append(_JoinSource(JoinBy.INTERACTIVE, value, ""))

    def add_odj_file_source(self, path_or_uri: str) -> None:
        self._sources.append(_JoinSource(JoinBy.ODJ_FILE, None, path_or_uri))

    def join(self, dns_updates: bool = False) -> None:
        if not self._sources:
            raise JoinError("no sources for join data")
        errors = []
        for src in self._sources:
            try:
                if src.method is JoinBy.PASSWORD:
                    assert src.upass
                    upass = src.upass
                    self._join(upass, dns_updates=dns_updates)
                elif src.method is JoinBy.FILE:
                    assert src.path
                    upass = self._read_from(src.path)
                    self._join(upass, dns_updates=dns_updates)
                elif src.method is JoinBy.INTERACTIVE:
                    assert src.upass
                    upass = UserPass(src.upass.username, _PROMPT)
                    self._join(upass, dns_updates=dns_updates)
                elif src.method is JoinBy.ODJ_FILE:
                    self._offline_join(src.path)
                else:
                    raise ValueError(f"invalid method: {src.method}")
                self._set_marker()
                return
            except JoinError as join_err:
                errors.append(join_err)
        if errors:
            if len(errors) == 1:
                raise errors[0]
            err = JoinError("failed {} join attempts".format(len(errors)))
            err.errors = errors
            raise err

    def _read_from(self, path: str) -> UserPass:
        try:
            with self._opener.open(path) as fh:
                data = json.load(fh)
        except FileNotFoundError:
            raise JoinError(f"source file not found: {path}")
        except OSError as err:
            if getattr(err, "errno", 0) != errno.ENOENT:
                raise
            raise JoinError(f"resource not found: {path}")
        upass = UserPass()
        try:
            upass.username = data["username"]
            upass.password = data["password"]
        except KeyError as err:
            raise JoinError(f"invalid file content: {err}")
        if not isinstance(upass.username, str):
            raise JoinError("invalid file content: invalid username")
        if not isinstance(upass.password, str):
            raise JoinError("invalid file content: invalid password")
        return upass

    def _interactive_input(self) -> typing.Optional[typing.IO]:
        return None

    def _join(self, upass: UserPass, dns_updates: bool = False) -> None:
        args = []
        if not dns_updates:
            args.append("--no-dns-updates")
        args.extend(["-U", upass.username])

        if upass.password is _PROMPT:
            cmd = list(self._net_ads_join[args])
            proc = subprocess.Popen(cmd, stdin=self._interactive_input())
        else:
            cmd = list(self._net_ads_join[args])
            proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
            pw_data = samba_cmds.encode(upass.password)
            # mypy can't seem to handle the following lines, and none of my web
            # searches turned up a clear answer. ignore for now
            proc.stdin.write(pw_data)  # type: ignore
            proc.stdin.write(b"\n")  # type: ignore
            proc.stdin.close()  # type: ignore
        ret = proc.wait()
        if ret != 0:
            raise JoinError("failed to run {}".format(cmd))

    def _offline_join(self, path: str) -> None:
        cmd = list(self._requestodj["-i"])
        try:
            with self._opener.open(path) as fh:
                proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
                assert proc.stdin  # mypy appeasment
                proc.stdin.write(fh.read())
                proc.stdin.close()
                ret = proc.wait()
                if ret != 0:
                    raise JoinError(f"failed running {cmd}")
        except FileNotFoundError:
            raise JoinError(f"source file not found: {path}")

    def _set_marker(self) -> None:
        if self.marker is not None:
            with open(self.marker, "w") as fh:
                json.dump({"joined": True}, fh)

    def did_join(self) -> bool:
        """Return true if the join marker exists and contains a true
        value in the joined key.
        """
        if self.marker is None:
            return False
        try:
            with open(self.marker) as fh:
                data = json.load(fh)
        except (ValueError, OSError):
            return False
        try:
            return data["joined"]
        except (TypeError, KeyError):
            return False


def join_when_possible(
    joiner: Joiner,
    waiter: Waiter,
    error_handler: typing.Optional[typing.Callable] = None,
) -> None:
    while True:
        if joiner.did_join():
            _logger.info("found valid join marker")
            return
        try:
            joiner.join()
            _logger.info("successful join")
            return
        except JoinError as err:
            if error_handler is not None:
                error_handler(err)
            else:
                raise
        waiter.wait()
07070100000041000081A4000000000000000000000001684BE19C0000061C000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/sambacc/leader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing

from sambacc.typelets import ExcType, ExcValue, ExcTraceback


class LeaderStatus(typing.Protocol):
    """Fetches information about the current cluster leader."""

    def is_leader(self) -> bool:
        """Return true if the current node is the leader."""
        ...  # pragma: no cover


class LeaderLocator(typing.Protocol):
    """Acquire state needed to determine or fix a cluster leader.
    Can be used for purely informational types or types that
    actually acquire cluster leadership if needed.
    """

    def __enter__(self) -> LeaderStatus:
        """Enter context manager. Returns LeaderStatus."""
        ...  # pragma: no cover

    def __exit__(
        self, exc_type: ExcType, exc_val: ExcValue, exc_tb: ExcTraceback
    ) -> bool:
        """Exit context manager."""
        ...  # pragma: no cover
07070100000042000081A4000000000000000000000001684BE19C00000C30000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/netcmd_loader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import subprocess
import typing

from sambacc import config
from sambacc import samba_cmds


class LoaderError(Exception):
    pass


def template_config(
    fh: typing.IO, iconfig: config.SambaConfig, enc: typing.Callable = str
) -> None:
    fh.write(enc("[global]\n"))
    for gkey, gval in iconfig.global_options():
        fh.write(enc(f"\t{gkey} = {gval}\n"))

    for share in iconfig.shares():
        fh.write(enc("\n[{}]\n".format(share.name)))
        for skey, sval in share.share_options():
            fh.write(enc(f"\t{skey} = {sval}\n"))


class NetCmdLoader:
    _net_conf = samba_cmds.net["conf"]

    def _cmd(
        self, *args: str, **kwargs: typing.Any
    ) -> tuple[list[str], typing.Any]:
        cmd = list(self._net_conf[args])
        return cmd, subprocess.Popen(cmd, **kwargs)

    def _check(self, cli: typing.Any, proc: subprocess.Popen) -> None:
        ret = proc.wait()
        if ret != 0:
            raise LoaderError("failed to run {}".format(cli))

    def import_config(self, iconfig: config.InstanceConfig) -> None:
        """Import to entire instance config to samba config."""
        cli, proc = self._cmd("import", "/dev/stdin", stdin=subprocess.PIPE)
        template_config(proc.stdin, iconfig, enc=samba_cmds.encode)
        proc.stdin.close()
        self._check(cli, proc)

    def dump(self, out: typing.IO) -> None:
        """Dump the current smb config in an smb.conf format.
        Writes the dump to `out`.
        """
        cli, proc = self._cmd("list", stdout=out)
        self._check(cli, proc)

    def _parse_shares(self, fh: typing.IO) -> typing.Iterable[str]:
        out = []
        for line in fh.readlines():
            line = line.strip().decode("utf8")
            if line == "global":
                continue
            out.append(line)
        return out

    def current_shares(self) -> typing.Iterable[str]:
        """Returns a list of current shares."""
        cli, proc = self._cmd("listshares", stdout=subprocess.PIPE)
        # read and parse shares list
        try:
            shares = self._parse_shares(proc.stdout)
        finally:
            self._check(cli, proc)
        return shares

    def set(self, section: str, param: str, value: str) -> None:
        """Set an individual config parameter."""
        cli, proc = self._cmd("setparm", section, param, value)
        self._check(cli, proc)
07070100000043000081A4000000000000000000000001684BE19C0000092B000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/sambacc/nsswitch_loader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing

from .textfile import TextFileLoader


class NameServiceSwitchLoader(TextFileLoader):
    def __init__(self, path: str) -> None:
        super().__init__(path)
        self.lines: list[str] = []
        self.idx: dict[str, int] = {}

    def loadlines(self, lines: typing.Iterable[str]) -> None:
        """Load in the lines from the text source."""
        # Ignore comments and blank lines
        for line in lines:
            if not line.strip() or line.startswith("#"):
                continue
            self.lines.append(line)
        for lnum, line in enumerate(self.lines):
            if line.startswith("passwd:"):
                self.idx["passwd"] = lnum
            if line.startswith("group:"):
                self.idx["group"] = lnum

    def dumplines(self) -> typing.Iterable[str]:
        """Dump the file content as lines of text."""
        prev = None
        yield "# Generated by sambacc -- DO NOT EDIT\n"
        for line in self.lines:
            if prev and not prev.endswith("\n"):
                yield "\n"
            yield line
            prev = line

    def winbind_enabled(self) -> bool:
        pline = self.lines[self.idx["passwd"]]
        gline = self.lines[self.idx["group"]]
        return ("winbind" in pline) and ("winbind" in gline)

    def ensure_winbind_enabled(self) -> None:
        pidx = self.idx["passwd"]
        if "winbind" not in self.lines[pidx]:
            self.lines[pidx] = "passwd:    files winbind\n"
        gidx = self.idx["group"]
        if "winbind" not in self.lines[gidx]:
            self.lines[gidx] = "group:    files winbind\n"
07070100000044000081A4000000000000000000000001684BE19C000007A3000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/sambacc/opener.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing


class SchemeNotSupported(Exception):
    pass


class Opener(typing.Protocol):
    """Protocol for a basic opener type that takes a path-ish or uri-ish
    string and tries to open it.
    """

    def open(self, path_or_uri: str) -> typing.IO:
        """Open a specified resource by path or (pseudo) URI."""
        ...  # pragma: no cover


class FallbackOpener:
    """FallbackOpener is used to open a path if a the string can not be
    opened as a URI/URL.
    """

    def __init__(
        self,
        openers: list[Opener],
        open_fn: typing.Optional[typing.Callable[..., typing.IO]] = None,
    ) -> None:
        self._openers = openers
        self._open_fn = open_fn or FileOpener.open

    def open(self, path_or_uri: str) -> typing.IO:
        for opener in self._openers:
            try:
                return opener.open(path_or_uri)
            except SchemeNotSupported:
                pass
        return self._open(path_or_uri)

    def _open(self, path: str) -> typing.IO:
        return self._open_fn(path)


class FileOpener:
    """Minimal opener that only supports opening local files."""

    @staticmethod
    def open(path: str) -> typing.IO:
        return open(path, "rb")
07070100000045000081A4000000000000000000000001684BE19C00000B51000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/passdb_loader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing

from sambacc import config

# Do the samba python bindings not export any useful constants?
ACB_DISABLED = 0x00000001
ACB_NORMAL = 0x00000010
ACB_PWNOEXP = 0x00000200


def _samba_modules() -> tuple[typing.Any, typing.Any]:
    from samba.samba3 import param  # type: ignore
    from samba.samba3 import passdb  # type: ignore

    return param, passdb


class PassDBLoader:
    def __init__(self, smbconf: typing.Any = None) -> None:
        param, passdb = _samba_modules()
        lp = param.get_context()
        if smbconf is None:
            lp.load_default()
        else:
            lp.load(smbconf)
        passdb.set_secrets_dir(lp.get("private dir"))
        self._pdb = passdb.PDB(lp.get("passdb backend"))
        self._passdb = passdb

    def add_user(self, user_entry: config.UserEntry) -> None:
        if not (user_entry.nt_passwd or user_entry.plaintext_passwd):
            raise ValueError(
                f"user entry {user_entry.username} lacks password value"
            )
        # probe for an existing user, by name
        try:
            samu = self._pdb.getsampwnam(user_entry.username)
        except self._passdb.error:
            samu = None
        # if it doesn't exist, create it
        if samu is None:
            # FIXME, research if there are better flag values to use
            acb = ACB_NORMAL | ACB_PWNOEXP
            self._pdb.create_user(user_entry.username, acb)
            samu = self._pdb.getsampwnam(user_entry.username)
        acb = samu.acct_ctrl
        # update password/metadata
        if user_entry.nt_passwd:
            samu.nt_passwd = user_entry.nt_passwd
        elif user_entry.plaintext_passwd:
            samu.plaintext_passwd = user_entry.plaintext_passwd
        # Try to mimic the behavior of smbpasswd and clear the account disabled
        # flag when adding or updating the user.
        # We don't expect granular, on the fly, user management in the
        # container, so it seems pointless to have a user that can't log in.
        if acb & ACB_DISABLED:
            samu.acct_ctrl = acb & ~ACB_DISABLED
        # update the db
        self._pdb.update_sam_account(samu)
07070100000046000081A4000000000000000000000001684BE19C00000B70000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/passwd_loader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing

from .textfile import TextFileLoader
from sambacc import config


class LineFileLoader(TextFileLoader):
    def __init__(self, path: str) -> None:
        super().__init__(path)
        self.lines: list[str] = []

    def loadlines(self, lines: typing.Iterable[str]) -> None:
        """Load in the lines from the text source."""
        for line in lines:
            self.lines.append(line)

    def dumplines(self) -> typing.Iterable[str]:
        """Dump the file content as lines of text."""
        prev = None
        for line in self.lines:
            if prev and not prev.endswith("\n"):
                yield "\n"
            yield line
            prev = line


class PasswdFileLoader(LineFileLoader):
    def __init__(self, path: str = "/etc/passwd") -> None:
        super().__init__(path)
        self._usernames: set[str] = set()

    def readfp(self, fp: typing.IO) -> None:
        super().readfp(fp)
        self._update_usernames_cache()

    def _update_usernames_cache(self) -> None:
        for line in self.lines:
            if ":" in line:
                u = line.split(":")[0]
                self._usernames.add(u)

    def add_user(self, user_entry: config.UserEntry) -> None:
        if user_entry.username in self._usernames:
            return
        line = "{}\n".format(":".join(user_entry.passwd_fields()))
        self.lines.append(line)
        self._usernames.add(user_entry.username)


class GroupFileLoader(LineFileLoader):
    def __init__(self, path: str = "/etc/group") -> None:
        super().__init__(path)
        self._groupnames: set[str] = set()

    def readfp(self, fp: typing.IO) -> None:
        super().readfp(fp)
        self._update_groupnames_cache()

    def _update_groupnames_cache(self) -> None:
        for line in self.lines:
            if ":" in line:
                u = line.split(":")[0]
                self._groupnames.add(u)

    def add_group(self, group_entry: config.GroupEntry) -> None:
        if group_entry.groupname in self._groupnames:
            return
        line = "{}\n".format(":".join(group_entry.group_fields()))
        self.lines.append(line)
        self._groupnames.add(group_entry.groupname)
07070100000047000081A4000000000000000000000001684BE19C0000070B000000000000000000000000000000000000002D00000000sambacc-v0.6+git.60.2f89a38/sambacc/paths.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import errno
import os


def ensure_samba_dirs(root: str = "/") -> None:
    """Ensure that certain directories that samba servers expect will
    exist. This is useful when mapping iniitally empty dirs into
    the container.
    """
    smb_dir = os.path.join(root, "var/lib/samba")
    smb_private_dir = os.path.join(smb_dir, "private")
    smb_run_dir = os.path.join(root, "run/samba")
    wb_sockets_dir = os.path.join(smb_run_dir, "winbindd")

    _mkdir(smb_dir)
    _mkdir(smb_private_dir)

    _mkdir(smb_run_dir)
    _mkdir(wb_sockets_dir)
    os.chmod(wb_sockets_dir, 0o755)


def _mkdir(path: str) -> None:
    try:
        os.mkdir(path)
    except OSError as err:
        if getattr(err, "errno", 0) != errno.EEXIST:
            raise


def ensure_share_dirs(path: str, root: str = "/") -> None:
    """Ensure that the given path exists.
    The optional root argument allows "reparenting" the path
    into a virtual root dir.
    """
    while path.startswith("/"):
        path = path[1:]
    path = os.path.join(root, path)
    os.makedirs(path, exist_ok=True)
07070100000048000081A4000000000000000000000001684BE19C0000141B000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/permissions.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from __future__ import annotations

import contextlib
import datetime
import errno
import logging
import os
import typing

from sambacc import _xattr as xattr


_logger = logging.getLogger(__name__)


class PermissionsHandler(typing.Protocol):
    def has_status(self) -> bool:
        """Return true if the path has status metadata."""
        ...  # pragma: no cover

    def status_ok(self) -> bool:
        """Return true if status is OK (no changes are needed)."""
        ...  # pragma: no cover

    def update(self) -> None:
        """Update the permissions as needed."""
        ...  # pragma: no cover

    def path(self) -> str:
        """Return the path under consideration."""
        ...  # pragma: no cover


@contextlib.contextmanager
def _opendir(path: str) -> typing.Iterator[int]:
    dfd: int = os.open(path, os.O_DIRECTORY)
    try:
        yield dfd
        os.fsync(dfd)
    except OSError:
        os.sync()
        raise
    finally:
        os.close(dfd)


class NoopPermsHandler:
    def __init__(
        self,
        path: str,
        status_xattr: str,
        options: typing.Dict[str, str],
        root: str = "/",
    ) -> None:
        self._path = path

    def path(self) -> str:
        return self._path

    def has_status(self) -> bool:
        return False

    def status_ok(self) -> bool:
        return True

    def update(self) -> None:
        pass


class InitPosixPermsHandler:
    """Initialize posix permissions on a share (directory).

    This handler sets posix permissions only.

    It will only set the permissions when the status xattr does not
    match the expected prefix value. This prevents it from overwiting
    permissions that may have been changed intentionally after
    share initialization.
    """

    _default_mode = 0o777
    _default_status_prefix = "v1"

    def __init__(
        self,
        path: str,
        status_xattr: str,
        options: typing.Dict[str, str],
        root: str = "/",
    ) -> None:
        self._path = path
        self._root = root
        self._xattr = status_xattr
        try:
            self._mode = int(options["mode"], 8)
        except KeyError:
            self._mode = self._default_mode
        try:
            self._prefix = options["status_prefix"]
        except KeyError:
            self._prefix = self._default_status_prefix

    def path(self) -> str:
        return self._path

    def _full_path(self) -> str:
        return os.path.join(self._root, self._path.lstrip("/"))

    def has_status(self) -> bool:
        try:
            self._get_status()
            return True
        except KeyError:
            return False

    def status_ok(self) -> bool:
        try:
            sval = self._get_status()
        except KeyError:
            return False
        curr_prefix = sval.split("/")[0]
        return curr_prefix == self._prefix

    def update(self) -> None:
        if self.status_ok():
            return
        self._set_perms()
        self._set_status()

    def _get_status(self) -> str:
        path = self._full_path()
        _logger.debug("reading xattr %r: %r", self._xattr, path)
        try:
            value = xattr.get(path, self._xattr, nofollow=True)
        except OSError as err:
            if err.errno == errno.ENODATA:
                raise KeyError(self._xattr)
            raise
        return value.decode("utf8")

    def _set_perms(self) -> None:
        # yeah, this is really simple compared to all the state management
        # stuff.
        path = self._full_path()
        with _opendir(path) as dfd:
            os.fchmod(dfd, self._mode)

    def _timestamp(self) -> str:
        return datetime.datetime.now().strftime("%s")

    def _set_status(self) -> None:
        # we save the marker prefix followed by a timestamp as a debugging hint
        ts = self._timestamp()
        val = f"{self._prefix}/{ts}"
        path = self._full_path()
        _logger.debug("setting xattr %r=%r: %r", self._xattr, val, self._path)
        with _opendir(path) as dfd:
            xattr.set(dfd, self._xattr, val, nofollow=True)


class AlwaysPosixPermsHandler(InitPosixPermsHandler):
    """Works like the init handler, but always sets the permissions,
    even if the status xattr exists and is valid.
    May be useful for testing and debugging.
    """

    def update(self) -> None:
        self._set_perms()
        self._set_status()
07070100000049000081A4000000000000000000000001684BE19C00003133000000000000000000000000000000000000003400000000sambacc-v0.6+git.60.2f89a38/sambacc/rados_opener.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from __future__ import annotations

import io
import json
import logging
import time
import typing
import urllib.request
import uuid

from . import url_opener
from .typelets import ExcType, ExcValue, ExcTraceback, Self

_RADOSModule = typing.Any
_RADOSObject = typing.Any

_CHUNK_SIZE = 4 * 1024

_logger = logging.getLogger(__name__)


class RADOSUnsupported(Exception):
    pass


class _RADOSInterface:
    api: _RADOSModule
    client_name: str
    full_name: bool

    def Rados(self) -> _RADOSObject:
        name = rados_id = ""
        if self.full_name:
            name = self.client_name
        else:
            rados_id = self.client_name
        _logger.debug("Creating RADOS connection")
        return self.api.Rados(
            name=name,
            rados_id=rados_id,
            conffile=self.api.Rados.DEFAULT_CONF_FILES,
        )


class _RADOSHandler(urllib.request.BaseHandler):
    _interface: typing.Optional[_RADOSInterface] = None

    def rados_open(self, req: urllib.request.Request) -> typing.IO:
        """Open a rados-style url. Called from urllib."""
        if self._interface is None:
            raise RADOSUnsupported()
        rinfo = parse_rados_uri(req)
        if rinfo.get("subtype") == "mon-config-key":
            return _get_mon_config_key(self._interface, rinfo["path"])
        return RADOSObjectRef(
            self._interface, rinfo["pool"], rinfo["ns"], rinfo["key"]
        )

    def get_object(
        self, uri: str, *, must_exist: bool = False
    ) -> RADOSObjectRef:
        """Return a rados object reference for the given rados uri. The uri
        must refer to a rados object only as the RADOSObjectRef can do various
        rados-y things, more than an IO requires.
        """
        if self._interface is None:
            raise RADOSUnsupported()
        rinfo = parse_rados_uri(urllib.request.Request(uri))
        if rinfo.get("type") != "rados":
            raise ValueError("only rados URI values supported")
        if rinfo.get("subtype") == "mon-config-key":
            raise ValueError("only rados object URI values supported")
        return RADOSObjectRef(
            self._interface,
            rinfo["pool"],
            rinfo["ns"],
            rinfo["key"],
            must_exist=must_exist,
        )


# it's quite annoying to have a read-only typing.IO we're forced to
# have so many stub methods. Go's much more granular io interfaces for
# readers/writers is much nicer for this.
class RADOSObjectRef(typing.IO):
    def __init__(
        self,
        interface: _RADOSInterface,
        pool: str,
        ns: str,
        key: str,
        *,
        must_exist: bool = True,
    ) -> None:
        self._pool = pool
        self._ns = ns
        self._key = key
        self._lock_description = "sambacc RADOS library"
        self._lock_duration = None

        self._open(interface)
        if must_exist:
            self._test()

    def _open(self, interface: _RADOSInterface) -> None:
        # TODO: connection caching
        self._api = interface.api
        self._conn = interface.Rados()
        self._conn.connect()
        self._connected = True
        self._ioctx = self._conn.open_ioctx(self._pool)
        self._ioctx.set_namespace(self._ns)
        self._closed = False
        self._offset = 0

    def _test(self) -> None:
        self._ioctx.stat(self._key)

    def read(self, size: typing.Optional[int] = None) -> bytes:
        if self._closed:
            raise ValueError("can not read from closed response")
        return self._read_all() if size is None else self._read(size)

    def _read_all(self) -> bytes:
        ba = bytearray()
        while True:
            chunk = self._read(_CHUNK_SIZE)
            ba += chunk
            if len(chunk) < _CHUNK_SIZE:
                break
        return bytes(ba)

    def _read(self, size: int) -> bytes:
        result = self._ioctx.read(self._key, size, self._offset)
        self._offset += len(result)
        return result

    def close(self) -> None:
        if not self._closed:
            self._ioctx.close()
            self._closed = True
        if self._connected:
            self._conn.shutdown()

    @property
    def closed(self) -> bool:
        return self._closed

    @property
    def mode(self) -> str:
        return "rb"

    @property
    def name(self) -> str:
        return self._key

    def __enter__(self) -> Self:
        return self

    def __exit__(
        self, exc_type: ExcType, exc_val: ExcValue, exc_tb: ExcTraceback
    ) -> None:
        self.close()

    def __iter__(self) -> Self:
        return self

    def __next__(self) -> bytes:
        res = self.read(_CHUNK_SIZE)
        if not res:
            raise StopIteration()
        return res

    def seekable(self) -> bool:
        return False

    def readable(self) -> bool:
        return True

    def writable(self) -> bool:
        return False

    def flush(self) -> None:
        pass

    def isatty(self) -> bool:
        return False

    def tell(self) -> int:
        return self._offset

    def seek(self, offset: int, whence: int = 0) -> int:
        raise NotImplementedError()

    def fileno(self) -> int:
        raise NotImplementedError()

    def readline(self, limit: int = -1) -> bytes:
        raise NotImplementedError()

    def readlines(self, hint: int = -1) -> list[bytes]:
        raise NotImplementedError()

    def truncate(self, size: typing.Optional[int] = None) -> int:
        raise NotImplementedError()

    def write(self, s: typing.Any) -> int:
        raise NotImplementedError()

    def writelines(self, ls: typing.Iterable[typing.Any]) -> None:
        raise NotImplementedError()

    def write_full(self, data: bytes) -> None:
        """Write the object such that its contents are exactly `data`."""
        self._ioctx.write_full(self._key, data)

    def _lock_exclusive(self, name: str, cookie: str) -> None:
        self._ioctx.lock_exclusive(
            self._key,
            name,
            cookie,
            desc=self._lock_description,
            duration=self._lock_duration,
        )

    def _acquire_lock_exclusive(
        self, name: str, cookie: str, *, delay: int = 1
    ) -> None:
        while True:
            try:
                self._lock_exclusive(name, cookie)
                return
            except self._api.ObjectBusy:
                _logger.debug(
                    "lock failed: %r, %r, %r: object busy",
                    self._key,
                    name,
                    cookie,
                )
                time.sleep(delay)

    def _unlock(self, name: str, cookie: str) -> None:
        self._ioctx.unlock(self._key, name, cookie)


def _get_mon_config_key(interface: _RADOSInterface, key: str) -> io.BytesIO:
    mcmd = json.dumps(
        {
            "prefix": "config-key get",
            "key": str(key),
        }
    )
    with interface.Rados() as rc:
        ret, out, err = rc.mon_command(mcmd, b"")
        if ret == 0:
            # We need to return a file like object. Since we are handed just
            # bytes from this api, use BytesIO to adapt it to something valid.
            return io.BytesIO(out)
        # ensure ceph didn't send us a negative errno
        ret = ret if ret > 0 else -ret
        msg = f"failed to get mon config key: {key!r}: {err}"
        raise OSError(ret, msg)


class ClusterMetaRADOSHandle:
    "A Cluster Meta Object can load or dump persistent cluster descriptions."

    def __init__(
        self,
        rados_obj: RADOSObjectRef,
        uri: str,
        *,
        read: bool,
        write: bool,
        locked: bool,
    ):
        self._rados_obj = rados_obj
        self._uri = uri
        self._read = read
        self._write = write
        self._locked = locked
        if self._locked:
            self._lock_name = "cluster_meta"
            self._cookie = f"sambacc:{uuid.uuid4()}"

    def load(self) -> typing.Any:
        if not self._read:
            raise ValueError("not readable")
        buf = self._rados_obj.read()
        if not buf:
            return {}
        return json.loads(buf)

    def dump(self, data: typing.Any) -> None:
        if not self._read:
            raise ValueError("not writable")
        buf = json.dumps(data).encode("utf8")
        self._rados_obj.write_full(buf)

    def __enter__(self) -> Self:
        if self._locked:
            self._rados_obj._acquire_lock_exclusive(
                self._lock_name, self._cookie
            )
        return self

    def __exit__(
        self, exc_type: ExcType, exc_val: ExcValue, exc_tb: ExcTraceback
    ) -> None:
        if self._locked:
            self._rados_obj._unlock(self._lock_name, self._cookie)
        return


class ClusterMetaRADOSObject:
    def __init__(self, rados_handler: _RADOSHandler, uri: str) -> None:
        self._handler = rados_handler
        self._uri = uri

    def open(
        self, *, read: bool = True, write: bool = False, locked: bool = False
    ) -> ClusterMetaRADOSHandle:
        return ClusterMetaRADOSHandle(
            self._handler.get_object(self._uri),
            self._uri,
            read=read,
            write=write,
            locked=locked,
        )

    @classmethod
    def create_from_uri(cls, uri: str) -> Self:
        """Return a new ClusterMetaRADOSObject given a rados uri string.
        If rados module is unavailable RADOSUnsupported will be raised.
        """
        handler = _RADOSHandler()
        if not handler._interface:
            raise RADOSUnsupported()
        return cls(handler, uri)


def is_rados_uri(uri: str) -> bool:
    """Return true if the string can be used as a rados (pseudo) URI.
    This function does not require the rados libraries to be available.
    NB: It does not validate the structure of the URI.
    """
    return uri.startswith("rados:")


def parse_rados_uri(
    uri: typing.Union[str, urllib.request.Request]
) -> dict[str, str]:
    """Given a rados uri-like value return a dict containing a breakdown of the
    components of the uri.
    """
    req = uri if not isinstance(uri, str) else urllib.request.Request(uri)
    subtype = "mon-config-key"
    if req.selector.startswith(subtype + ":"):
        return {
            "type": req.type,
            "subtype": subtype,
            "path": req.selector.split(":", 1)[1],
        }
    sel = req.selector.lstrip("/")
    if req.host:
        pool = req.host
        ns, key = sel.split("/", 1)
    else:
        pool, ns, key = sel.split("/", 2)
    return {
        "type": req.type,
        "subtype": "object",
        "pool": pool,
        "ns": ns,
        "key": key,
    }


def enable_rados(
    cls: typing.Type[url_opener.URLOpener],
    *,
    client_name: str = "",
    full_name: bool = False,
) -> None:
    """Enable Ceph RADOS support in sambacc.
    As as side-effect it will extend the URLOpener type to support pseudo-URLs
    for rados object storage. It will also enable the
    ClusterMetaRADOSObject.create_from_uri constructor. If rados libraries are
    not found the function does nothing.

    If rados libraries are found than URLOpener can be used like:
    >>> uo = url_opener.URLOpener()
    >>> res = uo.open("rados://my_pool/namepace/obj_key")
    >>> res.read()
    """
    try:
        import rados  # type: ignore[import]
    except ImportError:
        _logger.debug("Failed to import ceph 'rados' module")
        return

    _logger.debug(
        "Enabling ceph rados support with"
        f" client_name={client_name!r}, full_name={full_name}"
    )
    rados_interface = _RADOSInterface()
    rados_interface.api = rados
    rados_interface.client_name = client_name
    rados_interface.full_name = full_name

    _RADOSHandler._interface = rados_interface
    cls._handlers.append(_RADOSHandler)
0707010000004A000081A4000000000000000000000001684BE19C00001603000000000000000000000000000000000000003200000000sambacc-v0.6+git.60.2f89a38/sambacc/samba_cmds.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from __future__ import annotations

import os
import typing

DebugLevel = typing.Optional[str]
ArgList = typing.Optional[list[str]]

_GLOBAL_PREFIX: list[str] = []
_GLOBAL_DEBUG: str = ""


# Known flags for SAMBA_SPECIFICS env variable
_DAEMON_CLI_STDOUT_OPT: str = "daemon_cli_debug_output"
_CTDB_LEADER_ADMIN_CMD: str = "ctdb_leader_admin_command"
_CTDB_RADOS_MUTEX_SKIP_REG: str = "ctdb_rados_mutex_skip_reg"


def get_samba_specifics() -> typing.Set[str]:
    value = os.environ.get("SAMBA_SPECIFICS", "")
    out = set()
    if value:
        for v in value.split(","):
            out.add(v)
    return out


def _daemon_stdout_opt(daemon: str) -> str:
    if daemon == "smbd":
        opt = "--log-stdout"
    else:
        opt = "--stdout"
    opt_lst = get_samba_specifics()
    if _DAEMON_CLI_STDOUT_OPT in opt_lst:
        opt = "--debug-stdout"
    return opt


def ctdb_leader_admin_cmd() -> str:
    leader_cmd = "recmaster"
    opt_lst = get_samba_specifics()
    if _CTDB_LEADER_ADMIN_CMD in opt_lst:
        leader_cmd = "leader"
    return leader_cmd


def ctdb_rados_mutex_skip_registration_opt() -> str:
    if _CTDB_RADOS_MUTEX_SKIP_REG in get_samba_specifics():
        return "-R"  # skip registration option
    return ""


def set_global_prefix(lst: list[str]) -> None:
    _GLOBAL_PREFIX[:] = lst


def set_global_debug(level: str) -> None:
    global _GLOBAL_DEBUG
    _GLOBAL_DEBUG = level


def _to_args(value: typing.Any) -> list[str]:
    if isinstance(value, str):
        return [value]
    return [str(v) for v in value]


class CommandArgs:
    """A utility class for building command line commands."""

    _name: str
    args: list[str]
    cmd_prefix: list[str]

    def __init__(self, name: str, args: ArgList = None):
        self._name = name
        self.args = args or []
        self.cmd_prefix = []

    def __getitem__(self, new_value: typing.Any) -> CommandArgs:
        return self.__class__(self._name, args=self.args + _to_args(new_value))

    def raw_args(self) -> list[str]:
        return [self._name] + self.args

    def prefix_args(self) -> list[str]:
        return list(_GLOBAL_PREFIX) + list(self.cmd_prefix)

    def argv(self) -> list[str]:
        return self.prefix_args() + self.raw_args()

    def __iter__(self) -> typing.Iterator[str]:
        return iter(self.argv())

    def __repr__(self) -> str:
        return "CommandArgs({!r}, {!r})".format(self._name, self.args)

    @property
    def name(self) -> str:
        """Return the command to be executed. This may differ from
        the underlying command.
        """
        return self.argv()[0]


class SambaCommand(CommandArgs):
    """A utility class for building samba (or any) command line command."""

    debug: DebugLevel

    def __init__(
        self, name: str, args: ArgList = None, debug: DebugLevel = None
    ):
        super().__init__(name, args)
        self.debug = debug

    def __getitem__(self, new_value: typing.Any) -> SambaCommand:
        return self.__class__(
            self._name,
            args=self.args + _to_args(new_value),
            debug=self.debug,
        )

    def _debug_args(self, dlvl: str = "--debuglevel={}") -> list[str]:
        if self.debug:
            return [dlvl.format(self.debug)]
        if _GLOBAL_DEBUG:
            return [dlvl.format(_GLOBAL_DEBUG)]
        return []

    def raw_args(self) -> list[str]:
        return [self._name] + self.args + self._debug_args()

    def __repr__(self) -> str:
        return "SambaCommand({!r}, {!r}, {!r})".format(
            self._name, self.args, self.debug
        )


net = SambaCommand("net")

wbinfo = CommandArgs("wbinfo")

smbd = SambaCommand("/usr/sbin/smbd")

winbindd = SambaCommand("/usr/sbin/winbindd")

samba_dc = SambaCommand("/usr/sbin/samba")


def smbd_foreground() -> SambaCommand:
    return smbd[
        "--foreground", _daemon_stdout_opt("smbd"), "--no-process-group"
    ]


def winbindd_foreground() -> SambaCommand:
    return winbindd[
        "--foreground", _daemon_stdout_opt("winbindd"), "--no-process-group"
    ]


def samba_dc_foreground() -> SambaCommand:
    return samba_dc["--foreground", _daemon_stdout_opt("samba")]


ctdbd = SambaCommand("/usr/sbin/ctdbd")

ctdbd_foreground = ctdbd["--interactive"]

ltdbtool = CommandArgs("ltdbtool")

ctdb = CommandArgs("ctdb")

sambatool = SambaCommand("samba-tool")

smbcontrol = SambaCommand("smbcontrol")

smbstatus = SambaCommand("smbstatus")

ctdb_mutex_ceph_rados_helper = SambaCommand(
    "/usr/libexec/ctdb/ctdb_mutex_ceph_rados_helper"
)


def encode(value: typing.Union[str, bytes, None]) -> bytes:
    if value is None:
        return b""
    elif isinstance(value, str):
        value = value.encode("utf8")
    return value


def execute(cmd: CommandArgs) -> None:
    """Exec into the command specified (without forking)."""
    os.execvp(cmd.name, cmd.argv())  # pragma: no cover
0707010000004B000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002B00000000sambacc-v0.6+git.60.2f89a38/sambacc/schema0707010000004C000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/sambacc/schema/__init__.py0707010000004D000081A4000000000000000000000001684BE19C00002D44000000000000000000000000000000000000003F00000000sambacc-v0.6+git.60.2f89a38/sambacc/schema/conf-v0.schema.json{
  "$schema": "http://json-schema.org/draft-07/schema#",
  "$id": "mailto:phlogistonjohn+sambacc-v0@asynchrono.us",
  "title": "sambacc configuration",
  "description": "The configuration for the sambacc tool. sambacc configures Samba and the container\nenvironment to fit Samba's unique needs. This configuration can hold configuration\nfor more than one server \"instance\". The \"configs\" section contains one or more\nconfiguration with a name that can be selected at runtime. Share definitions\nand samba global configuration blocks can be mixed and matched.\n",
  "type": "object",
  "$defs": {
    "section_choices": {
      "description": "Selects sub-sections from elsewhere in the configuration.\n",
      "type": "array",
      "items": {
        "type": "string"
      }
    },
    "feature_flags": {
      "description": "Feature flags are used to enable specific, wide-ranging, features of\nsambacc. For example, it is used to enable clustered mode with ctdb.\n",
      "type": "array",
      "items": {
        "enum": [
          "addc",
          "ctdb"
        ]
      }
    },
    "samba_options": {
      "description": "A mapping of values that will be passed into the smb.conf (or equivalent)\nto directly configure Samba.\n",
      "type": "object",
      "additionalProperties": {
        "type": "string"
      }
    },
    "permissions_config": {
      "description": "Settings that enable and manage sambacc's permissions management support.\n",
      "type": "object",
      "properties": {
        "method": {
          "description": "Backend method for controlling permissions on shares",
          "type": "string"
        },
        "status_xattr": {
          "description": "xattr name used to store permissions state",
          "type": "string"
        }
      },
      "additionalProperties": {
        "type": "string"
      }
    },
    "user_entry": {
      "description": "A user that will be instantiated in the local container environment to\nin order to provide access to smb shares.\n",
      "type": "object",
      "properties": {
        "name": {
          "description": "The user's name",
          "type": "string"
        },
        "uid": {
          "description": "The Unix UID the user should have",
          "type": "integer"
        },
        "gid": {
          "description": "The Unix GID the user should have",
          "type": "integer"
        },
        "nt_hash": {
          "description": "An NT-Hashed password",
          "type": "string"
        },
        "password": {
          "description": "A plain-text password",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "additionalProperties": false
    },
    "group_entry": {
      "description": "A group that will be instantiated in the local container environment to\nin order to provide access to smb shares.\n",
      "type": "object",
      "properties": {
        "name": {
          "description": "The group name",
          "type": "string"
        },
        "gid": {
          "description": "The Unix GID the group should have",
          "type": "integer"
        }
      },
      "required": [
        "name"
      ],
      "additionalProperties": false
    },
    "domain_user_entry": {
      "description": "A user that will be created in the specified AD domain. These\nusers are populated in the directory after the domain is provisioned.\n",
      "type": "object",
      "properties": {
        "name": {
          "description": "The user's name",
          "type": "string"
        },
        "surname": {
          "description": "A surname for the user",
          "type": "string"
        },
        "given_name": {
          "description": "A given name for the user",
          "type": "string"
        },
        "uid": {
          "type": "integer"
        },
        "gid": {
          "type": "integer"
        },
        "nt_hash": {
          "type": "string"
        },
        "password": {
          "description": "A plain-text password",
          "type": "string"
        },
        "ou": {
          "description": "A organizational unit that the user should belong to",
          "type": "string"
        },
        "member_of": {
          "description": "A list of group names that the user should belong to",
          "type": "array",
          "items": {
            "type": "string"
          }
        }
      },
      "required": [
        "name"
      ],
      "additionalProperties": false
    },
    "domain_group_entry": {
      "description": "A group that will be created in the specified AD domain. These\ngroups are populated in the directory after the domain is provisioned.\n",
      "type": "object",
      "properties": {
        "name": {
          "description": "The group name",
          "type": "string"
        },
        "gid": {
          "type": "integer"
        },
        "ou": {
          "description": "A organizational unit that the user should belong to",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "additionalProperties": false
    },
    "organizational_unit_entry": {
      "description": "A organizational unit that will be created in the specified AD domain. These\ngroups are populated in the directory after the domain is provisioned.\n",
      "type": "object",
      "properties": {
        "name": {
          "description": "The organizational unit name",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "additionalProperties": false
    }
  },
  "properties": {
    "samba-container-config": {
      "type": "string",
      "title": "Configuration Format Version",
      "description": "A short version string that assists in allowing the configuration\nformat to (some day) support incompatible version changes.\n(It is unique to the configuration and is not the version of sambacc)\n"
    },
    "configs": {
      "title": "Container Configurations",
      "description": "A mapping of named configurations (instances) to top-level configuration\nblocks. A usable configuration file must have at least one configuration,\nbut more than one is supported.\n",
      "type": "object",
      "additionalProperties": {
        "type": "object",
        "properties": {
          "shares": {
            "$ref": "#/$defs/section_choices"
          },
          "globals": {
            "$ref": "#/$defs/section_choices"
          },
          "instance_features": {
            "$ref": "#/$defs/feature_flags"
          },
          "permissions": {
            "$ref": "#/$defs/permissions_config"
          },
          "instance_name": {
            "description": "A name that will be set for the server instance.\n",
            "type": "string"
          },
          "domain_settings": {
            "description": "The name of the domain settings. Only used with 'ADDC' feature flag.\n",
            "type": "string"
          }
        },
        "additionalProperties": false
      }
    },
    "shares": {
      "description": "A mapping of share name to share specific configuration. A share can\nhave \"options\" that are passed to Samba. Shares can have an optional\n\"permissions\" section for managing permissions/acls in sambacc.\n",
      "type": "object",
      "additionalProperties": {
        "type": "object",
        "properties": {
          "options": {
            "$ref": "#/$defs/samba_options"
          },
          "permissions": {
            "$ref": "#/$defs/permissions_config"
          }
        },
        "additionalProperties": false
      }
    },
    "globals": {
      "description": "A mapping of samba global configuration blocks. The global section names\nare not passed to Samba. All sections selected by a configuration are\nmerged together before passing to Samba.\n",
      "type": "object",
      "additionalProperties": {
        "type": "object",
        "properties": {
          "options": {
            "$ref": "#/$defs/samba_options"
          }
        },
        "additionalProperties": false
      }
    },
    "domain_settings": {
      "description": "A mapping of AD DC domain configuration keys to domain configurations.\nThese parameters are used when provisioning an AD DC instance.\n",
      "type": "object",
      "additionalProperties": {
        "type": "object",
        "properties": {
          "realm": {
            "type": "string"
          },
          "short_domain": {
            "type": "string"
          },
          "admin_password": {
            "type": "string"
          },
          "interfaces": {
            "type": "object",
            "properties": {
              "include_pattern": {
                "type": "string",
                "description": "A regular expression that must match for a network interface\nto be included in the AD DC interfaces list.\n"
              },
              "exclude_pattern": {
                "type": "string",
                "description": "A regular expression that must not match for a network interface\nto be included in the AD DC interfaces list.\n"
              }
            }
          }
        },
        "required": [
          "realm"
        ],
        "additionalProperties": false
      }
    },
    "users": {
      "description": "Users to add to the container environment in order to provide\nShare access-control without becoming a domain member server.\n",
      "type": "object",
      "properties": {
        "all_entries": {
          "type": "array",
          "items": {
            "$ref": "#/$defs/user_entry"
          }
        }
      }
    },
    "groups": {
      "description": "Groups to add to the container environment in order to provide\nShare access-control without becoming a domain member server.\n",
      "type": "object",
      "properties": {
        "all_entries": {
          "type": "array",
          "items": {
            "$ref": "#/$defs/group_entry"
          }
        }
      }
    },
    "domain_users": {
      "description": "The domain_users section defines initial users that will be automatically\nadded to a newly provisioned domain. This section is a mapping of the\ndomain settings name to a list of domain user entries.\n",
      "type": "object",
      "additionalProperties": {
        "type": "array",
        "items": {
          "$ref": "#/$defs/domain_user_entry"
        }
      }
    },
    "domain_groups": {
      "description": "The domain_groups section defines initial groups that will be\nautomatically added to a newly provisioned domain. This section is\na mapping of the domain settings name to a list of domain group entries.\n",
      "type": "object",
      "additionalProperties": {
        "type": "array",
        "items": {
          "$ref": "#/$defs/domain_group_entry"
        }
      }
    },
    "organizational_units": {
      "description": "The organizational_unit section defines initial organizational unit that will be\nautomatically added to a newly provisioned domain. This section is\na mapping of the domain settings name to a list of domain group entries.\n",
      "type": "object",
      "additionalProperties": {
        "type": "array",
        "items": {
          "$ref": "#/$defs/organizational_unit_entry"
        }
      }
    },
    "ctdb": {
      "type": "object",
      "additionalProperties": {
        "type": "string"
      }
    }
  },
  "additionalProperties": false,
  "required": [
    "samba-container-config"
  ],
  "patternProperties": {
    "^_": true
  }
}0707010000004E000081A4000000000000000000000001684BE19C00002CBC000000000000000000000000000000000000003F00000000sambacc-v0.6+git.60.2f89a38/sambacc/schema/conf-v0.schema.yaml---
# EDIT THIS FILE
# When you are done editing this YAML representation, convert it into
# a matching <name>.json file in the same directory. That file exists
# for jsonschema implementations that can't read directly from YAML.
#
# After editing this file, generated files need to be updated.
# Run: python -m sambacc.schema.tool --update
#
$schema:  "http://json-schema.org/draft-07/schema#"
$id: "mailto:phlogistonjohn+sambacc-v0@asynchrono.us"
title: "sambacc configuration"
description: |
  The configuration for the sambacc tool. sambacc configures Samba and the container
  environment to fit Samba's unique needs. This configuration can hold configuration
  for more than one server "instance". The "configs" section contains one or more
  configuration with a name that can be selected at runtime. Share definitions
  and samba global configuration blocks can be mixed and matched.
type: "object"
$defs:
  # indirections from the configuration to named sections
  # under globals, shares, etc.
  section_choices:
    description: |
      Selects sub-sections from elsewhere in the configuration.
    type: array
    items:
      type: string
  # feature flags are a known set of values
  feature_flags:
    description: |
      Feature flags are used to enable specific, wide-ranging, features of
      sambacc. For example, it is used to enable clustered mode with ctdb.
    type: array
    items:
      enum:
        - addc
        - ctdb
  # options that are passed directly into smb.conf
  samba_options:
    description: |
      A mapping of values that will be passed into the smb.conf (or equivalent)
      to directly configure Samba.
    type: object
    additionalProperties:
      type: string
  # permissions backend configurations
  # each backend may have its own set of additional properties
  permissions_config:
    description: |
      Settings that enable and manage sambacc's permissions management support.
    type: object
    properties:
      method:
        description: Backend method for controlling permissions on shares
        type: string
      status_xattr:
        description: xattr name used to store permissions state
        type: string
    additionalProperties:
      type: string
  # file server user entries
  user_entry:
    description: |
      A user that will be instantiated in the local container environment to
      in order to provide access to smb shares.
    type: object
    properties:
      name:
        description: The user's name
        type: string
      uid:
        description: The Unix UID the user should have
        type: integer
      gid:
        description: The Unix GID the user should have
        type: integer
      nt_hash:
        description: An NT-Hashed password
        type: string
      password:
        description: A plain-text password
        type: string
    required:
      - name
    additionalProperties: false
  # file server group entries
  group_entry:
    description: |
      A group that will be instantiated in the local container environment to
      in order to provide access to smb shares.
    type: object
    properties:
      name:
        description: The group name
        type: string
      gid:
        description: The Unix GID the group should have
        type: integer
    required:
      - name
    additionalProperties: false
  # domain controller user entries
  domain_user_entry:
    description: |
      A user that will be created in the specified AD domain. These
      users are populated in the directory after the domain is provisioned.
    type: object
    properties:
      name:
        description: The user's name
        type: string
      surname:
        description: A surname for the user
        type: string
      given_name:
        description: A given name for the user
        type: string
      uid:
        type: integer
      gid:
        type: integer
      nt_hash:
        type: string
      password:
        description: A plain-text password
        type: string
      ou:
        description: A organizational unit that the user should belong to
        type: string
      member_of:
        description: A list of group names that the user should belong to
        type: array
        items:
          type: string
    required:
      - name
    additionalProperties: false
  # domain controller group entries
  domain_group_entry:
    description: |
      A group that will be created in the specified AD domain. These
      groups are populated in the directory after the domain is provisioned.
    type: object
    properties:
      name:
        description: The group name
        type: string
      gid:
        type: integer
      ou:
        description: A organizational unit that the user should belong to
        type: string
    required:
      - name
    additionalProperties: false
  # domain controller organizational unit entries
  organizational_unit_entry:
    description: |
      A organizational unit that will be created in the specified AD domain. These
      groups are populated in the directory after the domain is provisioned.
    type: object
    properties:
      name:
        description: The organizational unit name
        type: string
    required:
      - name
    additionalProperties: false
properties:
  samba-container-config:
    type: "string"
    title: "Configuration Format Version"
    description: |
      A short version string that assists in allowing the configuration
      format to (some day) support incompatible version changes.
      (It is unique to the configuration and is not the version of sambacc)
  # top-level configuration section. each subsection is an "instance" -
  # a single server or a group of servers acting as one unit.
  # You can store multiple instance configurations in a single config and
  # use the sambacc --identity/SAMBA_CONTAINER_ID to select between them.
  configs:
    title: "Container Configurations"
    description: |
      A mapping of named configurations (instances) to top-level configuration
      blocks. A usable configuration file must have at least one configuration,
      but more than one is supported.
    type: object
    additionalProperties:
      type: object
      properties:
        shares:
          $ref: "#/$defs/section_choices"
        globals:
          $ref: "#/$defs/section_choices"
        instance_features:
          $ref: "#/$defs/feature_flags"
        permissions:
          $ref: "#/$defs/permissions_config"
        instance_name:
          description: |
            A name that will be set for the server instance.
          type: string
        domain_settings:
          description: |
            The name of the domain settings. Only used with 'ADDC' feature flag.
          type: string
      additionalProperties: false
  # share definitions.
  shares:
    description: |
      A mapping of share name to share specific configuration. A share can
      have "options" that are passed to Samba. Shares can have an optional
      "permissions" section for managing permissions/acls in sambacc.
    type: object
    additionalProperties:
      type: object
      properties:
        options:
          $ref: "#/$defs/samba_options"
        permissions:
          $ref: "#/$defs/permissions_config"
      additionalProperties: false
  # globals definitions.
  globals:
    description: |
      A mapping of samba global configuration blocks. The global section names
      are not passed to Samba. All sections selected by a configuration are
      merged together before passing to Samba.
    type: object
    additionalProperties:
      type: object
      properties:
        options:
          $ref: "#/$defs/samba_options"
      additionalProperties: false
  # domain_settings configures an AD DC based instance
  domain_settings:
    description: |
      A mapping of AD DC domain configuration keys to domain configurations.
      These parameters are used when provisioning an AD DC instance.
    type: object
    additionalProperties:
      type: object
      properties:
        realm:
          type: string
        short_domain:
          type: string
        admin_password:
          type: string
        interfaces:
          type: object
          properties:
            include_pattern:
              type: string
              description: |
                A regular expression that must match for a network interface
                to be included in the AD DC interfaces list.
            exclude_pattern:
              type: string
              description: |
                A regular expression that must not match for a network interface
                to be included in the AD DC interfaces list.
      required:
        - realm
      additionalProperties: false
  # users to be set up in the container environment prior to starting
  # a samba fileserver
  users:
    description: |
      Users to add to the container environment in order to provide
      Share access-control without becoming a domain member server.
    type: object
    properties:
      all_entries:
        type: array
        items:
          $ref: "#/$defs/user_entry"
  # groups to be set up in the container environment prior to starting
  # a samba fileserver
  groups:
    description: |
      Groups to add to the container environment in order to provide
      Share access-control without becoming a domain member server.
    type: object
    properties:
      all_entries:
        type: array
        items:
          $ref: "#/$defs/group_entry"
  # domain_users are users that will be initialized for a new AD DC instance
  domain_users:
    description: |
      The domain_users section defines initial users that will be automatically
      added to a newly provisioned domain. This section is a mapping of the
      domain settings name to a list of domain user entries.
    type: object
    additionalProperties:
      type: array
      items:
        $ref: "#/$defs/domain_user_entry"
  # domain_groups are groups that will be initialized for a new AD DC instance
  domain_groups:
    description: |
      The domain_groups section defines initial groups that will be
      automatically added to a newly provisioned domain. This section is
      a mapping of the domain settings name to a list of domain group entries.
    type: object
    additionalProperties:
      type: array
      items:
        $ref: "#/$defs/domain_group_entry"
  # organizational_unit are organizational unit that will be initialized for a new AD DC instance
  organizational_units:
    description: |
      The organizational_unit section defines initial organizational unit that will be
      automatically added to a newly provisioned domain. This section is
      a mapping of the domain settings name to a list of domain group entries.
    type: object
    additionalProperties:
      type: array
      items:
        $ref: "#/$defs/organizational_unit_entry"
  # ctdb customization settings
  # generally for developers/expert users only. these ctdb specific overrides
  # live outside the smb.conf and have their own section
  ctdb:
    type: object
    additionalProperties:
      type: string
additionalProperties: false
required:
  - samba-container-config
# we use the following patternProperties to allow any key starting
# with underscores so that the writer of the config can add extra
# metadata or comments freely.
patternProperties:
  "^_": true
0707010000004F000081A4000000000000000000000001684BE19C00003A72000000000000000000000000000000000000003D00000000sambacc-v0.6+git.60.2f89a38/sambacc/schema/conf_v0_schema.py#!/usr/bin/python3
# --- GENERATED FILE --- DO NOT EDIT --- #
# --- generated from: conf-v0.schema.yaml

SCHEMA = {
    "$schema": "http://json-schema.org/draft-07/schema#",
    "$id": "mailto:phlogistonjohn+sambacc-v0@asynchrono.us",
    "title": "sambacc configuration",
    "description": (
        "The configuration for the sambacc tool. sambacc configures Samba and"
        " the container\nenvironment to fit Samba's unique needs. This"
        " configuration can hold configuration\nfor more than one server"
        ' "instance". The "configs" section contains one or'
        " more\nconfiguration with a name that can be selected at runtime."
        " Share definitions\nand samba global configuration blocks can be"
        " mixed and matched.\n"
    ),
    "type": "object",
    "$defs": {
        "section_choices": {
            "description": (
                "Selects sub-sections from elsewhere in the configuration.\n"
            ),
            "type": "array",
            "items": {"type": "string"},
        },
        "feature_flags": {
            "description": (
                "Feature flags are used to enable specific, wide-ranging,"
                " features of\nsambacc. For example, it is used to enable"
                " clustered mode with ctdb.\n"
            ),
            "type": "array",
            "items": {"enum": ["addc", "ctdb"]},
        },
        "samba_options": {
            "description": (
                "A mapping of values that will be passed into the smb.conf"
                " (or equivalent)\nto directly configure Samba.\n"
            ),
            "type": "object",
            "additionalProperties": {"type": "string"},
        },
        "permissions_config": {
            "description": (
                "Settings that enable and manage sambacc's permissions"
                " management support.\n"
            ),
            "type": "object",
            "properties": {
                "method": {
                    "description": (
                        "Backend method for controlling permissions on shares"
                    ),
                    "type": "string",
                },
                "status_xattr": {
                    "description": (
                        "xattr name used to store permissions state"
                    ),
                    "type": "string",
                },
            },
            "additionalProperties": {"type": "string"},
        },
        "user_entry": {
            "description": (
                "A user that will be instantiated in the local container"
                " environment to\nin order to provide access to smb shares.\n"
            ),
            "type": "object",
            "properties": {
                "name": {"description": "The user's name", "type": "string"},
                "uid": {
                    "description": "The Unix UID the user should have",
                    "type": "integer",
                },
                "gid": {
                    "description": "The Unix GID the user should have",
                    "type": "integer",
                },
                "nt_hash": {
                    "description": "An NT-Hashed password",
                    "type": "string",
                },
                "password": {
                    "description": "A plain-text password",
                    "type": "string",
                },
            },
            "required": ["name"],
            "additionalProperties": False,
        },
        "group_entry": {
            "description": (
                "A group that will be instantiated in the local container"
                " environment to\nin order to provide access to smb shares.\n"
            ),
            "type": "object",
            "properties": {
                "name": {"description": "The group name", "type": "string"},
                "gid": {
                    "description": "The Unix GID the group should have",
                    "type": "integer",
                },
            },
            "required": ["name"],
            "additionalProperties": False,
        },
        "domain_user_entry": {
            "description": (
                "A user that will be created in the specified AD domain."
                " These\nusers are populated in the directory after the"
                " domain is provisioned.\n"
            ),
            "type": "object",
            "properties": {
                "name": {"description": "The user's name", "type": "string"},
                "surname": {
                    "description": "A surname for the user",
                    "type": "string",
                },
                "given_name": {
                    "description": "A given name for the user",
                    "type": "string",
                },
                "uid": {"type": "integer"},
                "gid": {"type": "integer"},
                "nt_hash": {"type": "string"},
                "password": {
                    "description": "A plain-text password",
                    "type": "string",
                },
                "ou": {
                    "description": (
                        "A organizational unit that the user should belong to"
                    ),
                    "type": "string",
                },
                "member_of": {
                    "description": (
                        "A list of group names that the user should belong to"
                    ),
                    "type": "array",
                    "items": {"type": "string"},
                },
            },
            "required": ["name"],
            "additionalProperties": False,
        },
        "domain_group_entry": {
            "description": (
                "A group that will be created in the specified AD domain."
                " These\ngroups are populated in the directory after the"
                " domain is provisioned.\n"
            ),
            "type": "object",
            "properties": {
                "name": {"description": "The group name", "type": "string"},
                "gid": {"type": "integer"},
                "ou": {
                    "description": (
                        "A organizational unit that the user should belong to"
                    ),
                    "type": "string",
                },
            },
            "required": ["name"],
            "additionalProperties": False,
        },
        "organizational_unit_entry": {
            "description": (
                "A organizational unit that will be created in the specified"
                " AD domain. These\ngroups are populated in the directory"
                " after the domain is provisioned.\n"
            ),
            "type": "object",
            "properties": {
                "name": {
                    "description": "The organizational unit name",
                    "type": "string",
                }
            },
            "required": ["name"],
            "additionalProperties": False,
        },
    },
    "properties": {
        "samba-container-config": {
            "type": "string",
            "title": "Configuration Format Version",
            "description": (
                "A short version string that assists in allowing the"
                " configuration\nformat to (some day) support incompatible"
                " version changes.\n(It is unique to the configuration and is"
                " not the version of sambacc)\n"
            ),
        },
        "configs": {
            "title": "Container Configurations",
            "description": (
                "A mapping of named configurations (instances) to top-level"
                " configuration\nblocks. A usable configuration file must"
                " have at least one configuration,\nbut more than one is"
                " supported.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "object",
                "properties": {
                    "shares": {"$ref": "#/$defs/section_choices"},
                    "globals": {"$ref": "#/$defs/section_choices"},
                    "instance_features": {"$ref": "#/$defs/feature_flags"},
                    "permissions": {"$ref": "#/$defs/permissions_config"},
                    "instance_name": {
                        "description": (
                            "A name that will be set for the server"
                            " instance.\n"
                        ),
                        "type": "string",
                    },
                    "domain_settings": {
                        "description": (
                            "The name of the domain settings. Only used with"
                            " 'ADDC' feature flag.\n"
                        ),
                        "type": "string",
                    },
                },
                "additionalProperties": False,
            },
        },
        "shares": {
            "description": (
                "A mapping of share name to share specific configuration. A"
                ' share can\nhave "options" that are passed to Samba. Shares'
                ' can have an optional\n"permissions" section for managing'
                " permissions/acls in sambacc.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "object",
                "properties": {
                    "options": {"$ref": "#/$defs/samba_options"},
                    "permissions": {"$ref": "#/$defs/permissions_config"},
                },
                "additionalProperties": False,
            },
        },
        "globals": {
            "description": (
                "A mapping of samba global configuration blocks. The global"
                " section names\nare not passed to Samba. All sections"
                " selected by a configuration are\nmerged together before"
                " passing to Samba.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "object",
                "properties": {"options": {"$ref": "#/$defs/samba_options"}},
                "additionalProperties": False,
            },
        },
        "domain_settings": {
            "description": (
                "A mapping of AD DC domain configuration keys to domain"
                " configurations.\nThese parameters are used when"
                " provisioning an AD DC instance.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "object",
                "properties": {
                    "realm": {"type": "string"},
                    "short_domain": {"type": "string"},
                    "admin_password": {"type": "string"},
                    "interfaces": {
                        "type": "object",
                        "properties": {
                            "include_pattern": {
                                "type": "string",
                                "description": (
                                    "A regular expression that must match for"
                                    " a network interface\nto be included in"
                                    " the AD DC interfaces list.\n"
                                ),
                            },
                            "exclude_pattern": {
                                "type": "string",
                                "description": (
                                    "A regular expression that must not match"
                                    " for a network interface\nto be included"
                                    " in the AD DC interfaces list.\n"
                                ),
                            },
                        },
                    },
                },
                "required": ["realm"],
                "additionalProperties": False,
            },
        },
        "users": {
            "description": (
                "Users to add to the container environment in order to"
                " provide\nShare access-control without becoming a domain"
                " member server.\n"
            ),
            "type": "object",
            "properties": {
                "all_entries": {
                    "type": "array",
                    "items": {"$ref": "#/$defs/user_entry"},
                }
            },
        },
        "groups": {
            "description": (
                "Groups to add to the container environment in order to"
                " provide\nShare access-control without becoming a domain"
                " member server.\n"
            ),
            "type": "object",
            "properties": {
                "all_entries": {
                    "type": "array",
                    "items": {"$ref": "#/$defs/group_entry"},
                }
            },
        },
        "domain_users": {
            "description": (
                "The domain_users section defines initial users that will be"
                " automatically\nadded to a newly provisioned domain. This"
                " section is a mapping of the\ndomain settings name to a list"
                " of domain user entries.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "array",
                "items": {"$ref": "#/$defs/domain_user_entry"},
            },
        },
        "domain_groups": {
            "description": (
                "The domain_groups section defines initial groups that will"
                " be\nautomatically added to a newly provisioned domain. This"
                " section is\na mapping of the domain settings name to a list"
                " of domain group entries.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "array",
                "items": {"$ref": "#/$defs/domain_group_entry"},
            },
        },
        "organizational_units": {
            "description": (
                "The organizational_unit section defines initial"
                " organizational unit that will be\nautomatically added to a"
                " newly provisioned domain. This section is\na mapping of the"
                " domain settings name to a list of domain group entries.\n"
            ),
            "type": "object",
            "additionalProperties": {
                "type": "array",
                "items": {"$ref": "#/$defs/organizational_unit_entry"},
            },
        },
        "ctdb": {
            "type": "object",
            "additionalProperties": {"type": "string"},
        },
    },
    "additionalProperties": False,
    "required": ["samba-container-config"],
    "patternProperties": {"^_": True},
}
07070100000050000081A4000000000000000000000001684BE19C0000132A000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/schema/tool.py#!/usr/bin/python3
"""Convert or compare schema files written in YAML to the corresponding
files stored in JSON.
"""

import argparse
import collections
import json
import os
import pprint
import subprocess
import sys

import yaml


nameparts = collections.namedtuple("nameparts", "full head ext")
filepair = collections.namedtuple("filepair", "origin dest format")


def _namesplit(name):
    head, ext = name.split(".", 1)
    return nameparts(name, head, ext)


def _pyname(np):
    head = np.head.replace("-", "_")
    if np.ext.startswith("schema."):
        head += "_schema"
    ext = "py"
    return nameparts(f"{head}.{ext}", head, ext)


def _format_black(path):
    # black does not formally have an api. Safeset way to use it is via
    # the cli
    path = os.path.abspath(path)
    # the --preview arg allows black to break up long strings that
    # the general check would discover and complain about. Otherwise
    # we'd be forced to ignore the formatting on these .py files.
    subprocess.run(["black", "-l78", "--preview", path], check=True)


def match(files):
    yamls = []
    jsons = []
    pys = []
    for fname in files:
        try:
            np = _namesplit(fname)
        except ValueError:
            continue
        if np.ext == "schema.yaml":
            yamls.append(np)
        if np.ext == "schema.json":
            jsons.append(np)
        if np.ext == "py":
            pys.append(np)
    pairs = []
    for yname in yamls:
        for jname in jsons:
            if jname.head == yname.head:
                pairs.append(filepair(yname, jname, "JSON"))
                break
        else:
            pairs.append(filepair(yname, None, "JSON"))
        for pyname in pys:
            if _pyname(yname).head == pyname.head:
                pairs.append(filepair(yname, pyname, "PYTHON"))
                break
        else:
            pairs.append(filepair(yname, None, "PYTHON"))
    return pairs


def report(func, path, yaml_file, json_file, fmt):
    needs_update = func(path, yaml_file, json_file, fmt)
    json_name = "---" if not json_file else json_file.full
    if not needs_update:
        print(f"{yaml_file.full} -> {fmt.lower()}:{json_name}   OK")
        return None
    print(f"{yaml_file.full} -> {fmt.lower()}:{json_name}   MISMATCH")
    return True


def update_json(path, yaml_file, json_file):
    yaml_path = os.path.join(path, yaml_file.full)
    json_path = os.path.join(path, f"{yaml_file.head}.schema.json")
    with open(yaml_path) as fh:
        yaml_data = yaml.safe_load(fh)
    with open(json_path, "w") as fh:
        json.dump(yaml_data, fh, indent=2)


def compare_json(path, yaml_file, json_file):
    if json_file is None:
        return True
    yaml_path = os.path.join(path, yaml_file.full)
    json_path = os.path.join(path, json_file.full)
    with open(yaml_path) as fh:
        yaml_data = yaml.safe_load(fh)
    with open(json_path) as fh:
        json_data = json.load(fh)
    return yaml_data != json_data


def update_py(path, yaml_file, py_file):
    yaml_path = os.path.join(path, yaml_file.full)
    py_path = os.path.join(path, _pyname(yaml_file).full)
    with open(yaml_path) as fh:
        yaml_data = yaml.safe_load(fh)
    out = []
    out.append("#!/usr/bin/python3")
    out.append("# --- GENERATED FILE --- DO NOT EDIT --- #")
    out.append(f"# --- generated from: {yaml_file.full}")
    out.append("")
    out.append(
        "SCHEMA = " + pprint.pformat(yaml_data, width=800, sort_dicts=False)
    )
    content = "\n".join(out)
    with open(py_path, "w") as fh:
        fh.write(content)
    _format_black(py_path)


def compare_py(path, yaml_file, py_file):
    if py_file is None:
        return True
    yaml_path = os.path.join(path, yaml_file.full)
    py_path = os.path.join(path, py_file.full)
    with open(yaml_path) as fh:
        yaml_data = yaml.safe_load(fh)
    with open(py_path) as fh:
        py_locals = {}
        exec(fh.read(), None, py_locals)
    py_data = py_locals.get("SCHEMA") or {}
    return yaml_data != py_data


def update(path, yaml_data, other_file, fmt):
    if fmt == "PYTHON":
        return update_py(path, yaml_data, other_file)
    return update_json(path, yaml_data, other_file)


def compare(path, yaml_data, other_file, fmt):
    if fmt == "PYTHON":
        return compare_py(path, yaml_data, other_file)
    return compare_json(path, yaml_data, other_file)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("DIR", default=os.path.dirname(__file__), nargs="?")
    parser.add_argument("--update", action="store_true")
    cli = parser.parse_args()

    mismatches = []
    os.chdir(cli.DIR)
    fn = update if cli.update else compare
    pairs = match(os.listdir("."))
    for pair in pairs:
        mismatches.append(report(fn, ".", *pair))
    if any(mismatches):
        sys.exit(1)


if __name__ == "__main__":
    main()
07070100000051000081A4000000000000000000000001684BE19C00000BF2000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/simple_waiter.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import time
import typing


def generate_sleeps() -> typing.Iterator[int]:
    """Generate sleep times starting with short sleeps and then
    getting longer. This assumes that resources may take a bit of
    time to settle, but eventually reach a steadier state and don't
    require being checked as often.
    """
    total = 0
    while True:
        if total > 120:
            val = 60
        elif total > 10:
            val = 5
        else:
            val = 1
        yield val
        total += val


# It's a bit overkill to have a class for this but I didn't like messing
# around with functools.partial or functions returning functions for this.
# It's also nice to replace the sleep function for unit tests.
class Sleeper:
    """It waits only by sleeping. Nothing fancy."""

    def __init__(
        self, times: typing.Optional[typing.Iterator[int]] = None
    ) -> None:
        if times is None:
            times = generate_sleeps()
        self._times = times
        self._sleep = time.sleep

    def wait(self) -> None:
        self._sleep(next(self._times))

    def acted(self) -> None:
        """Inform the sleeper the caller reacted to a change and
        the sleeps should be reset.
        """
        self.times = generate_sleeps()


class Waiter(typing.Protocol):
    """Waiter protocol - interfaces common to all waiters."""

    def wait(self) -> None:
        """Pause execution for a time."""
        ...  # pragma: no cover

    def acted(self) -> None:
        """Inform that waiter that changes were made."""
        ...  # pragma: no cover


def watch(
    waiter: Waiter,
    initial_value: typing.Any,
    fetch_func: typing.Callable[..., typing.Any],
    compare_func: typing.Callable[..., typing.Tuple[typing.Any, bool]],
) -> None:
    """A very simple "event loop" that fetches current data with
    `fetch_func`, compares and updates state with `compare_func` and
    then waits for new events with `pause_func`.
    """
    previous = initial_value
    while True:
        try:
            previous, updated = compare_func(fetch_func(), previous)
        except FileNotFoundError:
            updated = False
            previous = None
        try:
            if updated:
                waiter.acted()
            waiter.wait()
        except KeyboardInterrupt:
            return
07070100000052000081A4000000000000000000000001684BE19C00000A59000000000000000000000000000000000000003300000000sambacc-v0.6+git.60.2f89a38/sambacc/smbconf_api.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import typing


class ConfigStore(typing.Protocol):
    def __getitem__(self, name: str) -> list[tuple[str, str]]:
        """Get an item, returning a config section."""
        ...  # pragma: no cover

    def __setitem__(self, name: str, value: list[tuple[str, str]]) -> None:
        """Set a new config section."""
        ...  # pragma: no cover

    def __iter__(self) -> typing.Iterator[str]:
        """Iterate over config sections in the store."""
        ...  # pragma: no cover


class SimpleConfigStore:
    def __init__(self) -> None:
        self._data: dict[str, list[tuple[str, str]]] = {}

    @property
    def writeable(self) -> bool:
        """True if using a read-write backend."""
        return True

    def __getitem__(self, name: str) -> list[tuple[str, str]]:
        return self._data[name]

    def __setitem__(self, name: str, value: list[tuple[str, str]]) -> None:
        self._data[name] = value

    def __iter__(self) -> typing.Iterator[str]:
        return iter(self._data.keys())

    def import_smbconf(
        self, src: ConfigStore, batch_size: typing.Optional[int] = None
    ) -> None:
        """Import content from one SMBConf configuration object into the
        current SMBConf configuration object.

        batch_size is ignored.
        """
        for sname in src:
            self[sname] = src[sname]


def write_store_as_smb_conf(out: typing.IO, conf: ConfigStore) -> None:
    """Write the configuration store in smb.conf format to `out`."""
    # unfortunately, AFAIK, there's no way for an smbconf to write
    # into a an smb.conf/ini style file. We have to do it on our own.
    # ---
    # Make sure global section comes first.
    sections = sorted(conf, key=lambda v: 0 if v == "global" else 1)
    for sname in sections:
        out.write(str("\n[{}]\n".format(sname)))
        for skey, sval in conf[sname]:
            out.write(str(f"\t{skey} = {sval}\n"))
07070100000053000081A4000000000000000000000001684BE19C00001470000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/sambacc/smbconf_samba.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sys
import types
import importlib
import typing
import itertools

from sambacc.smbconf_api import ConfigStore


def _smbconf() -> types.ModuleType:
    return importlib.import_module("samba.smbconf")


def _s3smbconf() -> types.ModuleType:
    return importlib.import_module("samba.samba3.smbconf")


def _s3param() -> types.ModuleType:
    return importlib.import_module("samba.samba3.param")


if sys.version_info >= (3, 11):
    from typing import Self as _Self
else:
    _Self = typing.TypeVar("_Self", bound="SMBConf")


class SMBConf:
    """SMBConf wraps the samba smbconf library, supporting reading from and,
    when possible, writing to samba configuration backends.  The SMBConf type
    supports transactions using the context managager interface.  The SMBConf
    type can read and write configuration based on dictionary-like access,
    using shares as the keys. The global configuration is treated like a
    special "share".
    """

    def __init__(self, smbconf: typing.Any) -> None:
        self._smbconf = smbconf

    @classmethod
    def from_file(cls: typing.Type[_Self], path: str) -> _Self:
        """Open a smb.conf style configuration from the specified path."""
        return cls(_smbconf().init_txt(path))

    @classmethod
    def from_registry(
        cls: typing.Type[_Self],
        configfile: str = "/etc/samba/smb.conf",
        key: typing.Optional[str] = None,
    ) -> _Self:
        """Open samba's registry backend for configuration parameters."""
        s3_lp = _s3param().get_context()
        s3_lp.load(configfile)
        return cls(_s3smbconf().init_reg(key))

    @property
    def writeable(self) -> bool:
        """True if using a read-write backend."""
        return self._smbconf.is_writeable()

    # the extraneous `self: _Self` type makes mypy on python <3.11 happy.
    # otherwise it complains: `A function returning TypeVar should receive at
    # least one argument containing the same TypeVar`
    def __enter__(self: _Self) -> _Self:
        self._smbconf.transaction_start()
        return self

    def __exit__(
        self, exc_type: typing.Any, exc_value: typing.Any, tb: typing.Any
    ) -> None:
        if exc_type is None:
            self._smbconf.transaction_commit()
            return
        return self._smbconf.transaction_cancel()

    def __getitem__(self, name: str) -> list[tuple[str, str]]:
        try:
            n2, values = self._smbconf.get_share(name)
        except _smbconf().SMBConfError as err:
            if err.error_code == _smbconf().SBC_ERR_NO_SUCH_SERVICE:
                raise KeyError(name)
            raise
        if name != n2:
            raise ValueError(f"section name invalid: {name!r} != {n2!r}")
        return values

    def __setitem__(self, name: str, value: list[tuple[str, str]]) -> None:
        try:
            self._smbconf.delete_share(name)
        except _smbconf().SMBConfError as err:
            if err.error_code != _smbconf().SBC_ERR_NO_SUCH_SERVICE:
                raise
        self._smbconf.create_set_share(name, value)

    def __iter__(self) -> typing.Iterator[str]:
        return iter(self._smbconf.share_names())

    def import_smbconf(
        self, src: ConfigStore, batch_size: typing.Optional[int] = 100
    ) -> None:
        """Import content from one SMBConf configuration object into the
        current SMBConf configuration object.

        Set batch_size to the maximum number of "shares" to import in one
        transaction. Set batch_size to None to use only one transaction.
        """
        if not self.writeable:
            raise ValueError("SMBConf is not writable")
        if batch_size is None:
            return self._import_smbconf_all(src)
        return self._import_smbconf_batched(src, batch_size)

    def _import_smbconf_all(self, src: ConfigStore) -> None:
        with self:
            for sname in src:
                self[sname] = src[sname]

    def _import_smbconf_batched(
        self, src: ConfigStore, batch_size: int
    ) -> None:
        # based on a comment in samba's source code for the net command
        # only import N 'shares' at a time so that the transaction does
        # not exceed talloc memory limits
        def _batch_keyfunc(item: tuple[int, str]) -> int:
            return item[0] // batch_size

        for _, snames in itertools.groupby(enumerate(src), _batch_keyfunc):
            with self:
                for _, sname in snames:
                    self[sname] = src[sname]
07070100000054000081A4000000000000000000000001684BE19C000006DC000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/sambacc/textfile.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import os
import typing


class TextFileLoader:
    def __init__(self, path: str):
        self.path = path

    def read(self) -> None:
        with open(self.path) as f:
            self.readfp(f)

    def write(self, alternate_path: typing.Optional[str] = None) -> None:
        path = self.path
        if alternate_path:
            path = alternate_path
        tpath = self._tmp_path(path)
        with open(tpath, "w") as f:
            self.writefp(f)
        os.rename(tpath, path)

    def _tmp_path(self, path: str) -> str:
        # for later: make this smarter
        return f"{path}.tmp"

    def readfp(self, fp: typing.IO) -> None:
        self.loadlines(fp.readlines())

    def writefp(self, fp: typing.IO) -> None:
        for line in self.dumplines():
            fp.write(line)
        fp.flush()

    def dumplines(self) -> typing.Iterable[str]:
        """Must be overridden."""
        return []

    def loadlines(self, lines: typing.Iterable[str]) -> None:
        """Must be overridden."""
        pass
07070100000055000081A4000000000000000000000001684BE19C00000495000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/sambacc/typelets.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#
"""typelets defines common-ish type hinting types that are tedious to
remember/redefine.
"""

from types import TracebackType
import sys
import typing

ExcType = typing.Optional[typing.Type[BaseException]]
ExcValue = typing.Optional[BaseException]
ExcTraceback = typing.Optional[TracebackType]


if sys.version_info >= (3, 11):
    from typing import Self
elif typing.TYPE_CHECKING:
    from typing_extensions import Self
else:
    Self = typing.Any
07070100000056000081A4000000000000000000000001684BE19C00000B23000000000000000000000000000000000000003200000000sambacc-v0.6+git.60.2f89a38/sambacc/url_opener.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import errno
import http
import typing
import urllib.error
import urllib.request

from .opener import SchemeNotSupported


class _UnknownHandler(urllib.request.BaseHandler):
    def unknown_open(self, req: urllib.request.Request) -> None:
        raise SchemeNotSupported(req.full_url)


class URLOpener:
    """An Opener type used for fetching remote resources named in
    a pseudo-URL (URI-like) style.
    By default works like urllib.urlopen but only for HTTP(S).

    Example:
    >>> uo = URLOpener()
    >>> res = uo.open("http://abc.example.org/foo/x.json")
    >>> res.read()
    """

    # this list is similar to the defaults found in build_opener
    # but only for http/https handlers. No FTP, etc.
    _handlers = [
        urllib.request.ProxyHandler,
        urllib.request.HTTPHandler,
        urllib.request.HTTPDefaultErrorHandler,
        urllib.request.HTTPRedirectHandler,
        urllib.request.HTTPErrorProcessor,
        urllib.request.HTTPSHandler,
        _UnknownHandler,
    ]

    def __init__(self) -> None:
        self._opener = urllib.request.OpenerDirector()
        for handler in self._handlers:
            self._opener.add_handler(handler())

    def open(self, url: str) -> typing.IO:
        try:
            return self._opener.open(url)
        except ValueError as err:
            # too bad urllib doesn't use a specific subclass of ValueError here
            if "unknown url type" in str(err):
                raise SchemeNotSupported(url) from err
            raise
        except urllib.error.HTTPError as err:
            _map_errno(err)
            raise


_EMAP = {
    http.HTTPStatus.NOT_FOUND.value: errno.ENOENT,
    http.HTTPStatus.UNAUTHORIZED.value: errno.EPERM,
}


def _map_errno(err: urllib.error.HTTPError) -> None:
    """While HTTPError is an OSError, it often doesn't have an errno set.
    Since our callers care about the errno, do a best effort mapping of
    some HTTP statuses to errnos.
    """
    if getattr(err, "errno", None) is not None:
        return
    status = int(getattr(err, "status", -1))
    setattr(err, "errno", _EMAP.get(status, None))
07070100000057000081A4000000000000000000000001684BE19C000004D5000000000000000000000000000000000000002600000000sambacc-v0.6+git.60.2f89a38/setup.cfg# Note: I'd prefer that everything here be removed in favor of
# pyproject.toml, but the timing isn't quite right yet for PEP 621 support in
# setuptools so we need to put the values here for now.

[metadata]
name = sambacc
version = 0.1
description = Samba Container Configurator
author = John Mulligan
author_email = phlogistonjohn@asynchrono.us
readme = file: README.md
url = https://github.com/samba-in-kubernetes/sambacc
license = GPL3
long_description = file: README.md
long_description_content_type = text/markdown

[options]
packages =
    sambacc
    sambacc.commands
    sambacc.schema
    sambacc.grpc
    sambacc.grpc.generated
    sambacc.commands.remotecontrol
include_package_data = True

[options.entry_points]
console_scripts =
    samba-container = sambacc.commands.main:main
    samba-dc-container = sambacc.commands.dcmain:main
    samba-remote-control = sambacc.commands.remotecontrol.main:main

[options.data_files]
share/sambacc/examples =
  examples/ctdb.json
  examples/example1.json
  examples/minimal.json
  examples/addc.json

[options.extras_require]
validation =
    jsonschema>=4.10
yaml =
    PyYAML>=5.4
toml =
    tomli;python_version<"3.11"
rados =
    rados
grpc =
    grpcio>=1.48
    protobuf>=3.19
07070100000058000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002200000000sambacc-v0.6+git.60.2f89a38/tests07070100000059000081A4000000000000000000000001684BE19C00000000000000000000000000000000000000000000002E00000000sambacc-v0.6+git.60.2f89a38/tests/__init__.py0707010000005A000041ED000000000000000000000002684BE19C00000000000000000000000000000000000000000000002C00000000sambacc-v0.6+git.60.2f89a38/tests/container0707010000005B000081A4000000000000000000000001684BE19C00000272000000000000000000000000000000000000003A00000000sambacc-v0.6+git.60.2f89a38/tests/container/ContainerfileARG SAMBACC_BASE_IMAGE='registry.fedoraproject.org/fedora:41'
FROM $SAMBACC_BASE_IMAGE


COPY build.sh /usr/local/bin/build.sh

# Set SAMBACC_MINIMAL to yes to build a container that only contains the
# build.sh script on top of the base image. When called, build.sh will
# automatically install the dependency packages. Installing packages on every
# run can be slow, especially if you are hacking on the code or tests, so we
# install those dependencies proactively by default.
ARG SAMBACC_MINIMAL=no
RUN if [ "$SAMBACC_MINIMAL" != "yes" ]; then /usr/local/bin/build.sh --install ; fi
ENTRYPOINT ["/usr/local/bin/build.sh"]
0707010000005C000081ED000000000000000000000001684BE19C00001D0B000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/tests/container/build.sh#!/bin/bash

set -e

python=python3
url="${SAMBACC_REPO_URL:-https://github.com/samba-in-kubernetes/sambacc}"
bdir="${SAMBACC_BUILD_DIR:-/var/tmp/build/sambacc}"
distname="${SAMBACC_DISTNAME}"
# use SAMBACC_BUILD_TASKS to limit build tasks if needed
tasks="${SAMBACC_BUILD_TASKS:-task_test_tox task_py_build task_rpm_build task_gen_sums}"
dist_prefix="${SAMBACC_DIST_PREFIX:-/srv/dist}"
dnf_cmd=dnf

info() {
    echo "[[sambacc/build]] $*"
}

checked_out() {
    local d="$1"
    # allow manual clones with either git or hg
    [[ -d "$d" && ( -d "$d/.git" || -d "$d/.hg" ) ]]
}

clone() {
    # if the script is doing the cloning we default to git
    # as obnoxxx has peer-pressured me into it
    git clone "$1" "$2"
}

update() {
    local d="$1"
    local node="$2"
    if [[ -d "$d/.hg" ]]; then
        hg update --check "${node}"
    else
        git checkout "${node}"
    fi
}

chk() {
    for x in $tasks; do
        case "$1" in
            "$x")
                # execute the named task if it is in $tasks
                "$1"
                return $?
            ;;
        esac
    done
    info "skipping task: $1"
}

get_distdir() {
    dname="$1"
    if [ "${dname}" ]; then
        ddir="${dist_prefix}/$dname"
    else
        ddir="/var/tmp/scratch_dist"
    fi
    mkdir -p "$ddir" >/dev/null
    echo "$ddir"
}

setup_fetch() {
    # allow customizing the repo on the cli or environment
    if [ "$1" ]; then
        url="$1"
    elif [ "${SAMBACC_REPO_URL}" ]; then
        url="${SAMBACC_REPO_URL}"
    fi

    mkdir -p /var/tmp/build/ || true
    if checked_out "${bdir}" ; then
        info "repo already checked out"
    else
        info "cloning sambacc repo"
        clone "$url" "${bdir}"
    fi
}

setup_update() {
    if [ "$1" ]; then
        # a tag or revision id was specified on the cli
        update "${bdir}" "$1"
    fi
}

task_sys_deps() {
    info "installing system packages"
    OS_VER=$(source /etc/os-release && echo "${ID}-${VERSION_ID}")
    case "${OS_VER}" in
        centos*)
            info "detected centos (stream): ${OS_VER}"
            use_centos=true
        ;;
        rhel*)
            info "detected rhel: ${OS_VER}"
            use_centos=
            use_rhel=true
        ;;
        fedora*)
            info "detected fedora: ${OS_VER}"
            use_centos=
        ;;
        *)
            info "unknown platform: ${OS_VER}"
            return 1
        ;;
    esac

    yum_args=("--setopt=install_weak_deps=False")
    pkgs=(\
        git \
        mercurial \
        python-pip \
        python-pip-wheel \
        python-setuptools \
        python-setuptools-wheel \
        python-tox \
        python3-samba \
        python3-wheel \
        python3-pyxattr \
        python3-devel \
        python3.9 \
        samba-common-tools \
        rpm-build \
        'python3dist(flake8)' \
        'python3dist(inotify-simple)' \
        'python3dist(mypy)' \
        'python3dist(pytest)' \
        'python3dist(pytest-cov)' \
        'python3dist(setuptools-scm)' \
        'python3dist(tox-current-env)' \
        'python3dist(wheel)' \
    )

    if [ "$use_centos" ]; then
        "${dnf_cmd}" install -y epel-release
        yum_args=(--enablerepo=crb)
        pkgs+=(pyproject-rpm-macros)
    fi
    if [ "$use_rhel" ]; then
        pkgs+=(pyproject-rpm-macros)
    fi
    "${dnf_cmd}" "${yum_args[@]}" install -y "${pkgs[@]}"
    "${dnf_cmd}" clean all
}

task_test_tox() {
    # Run tox with sitepackages enabled to allow access to system installed samba
    # modules. The container env already provides us control over the env.
    info "running test suite with tox"
    tox
}

task_py_build() {
    info "building python package(s)"
    pip -qq install build
    # if distname is set, then we are building for external consumption
    # if distname is not set then we're building for internal consumption
    # only
    distdir="$(get_distdir "$distname")"
    info "using dist dir: $distdir"

    # setuptools_scm calls into git, newer git versions have stricter ownership
    # rules that can break our builds when mounted into a container. Tell our
    # in-container git, that it's all ok and the monsters aren't real.
    # This config will vanish once the container exits anyway.
    git config --global --add safe.directory "${bdir}"
    $python -m build --outdir "$distdir"
}

task_rpm_build() {
    if ! command -v rpmbuild ; then
        info "rpmbuild not found ... skipping"
        return
    fi

    distdir="$(get_distdir "$distname")"
    local rpmbuild_stage="-ba"
    if [ "${SAMBACC_SRPM_ONLY}" ]; then
        rpmbuild_stage="-bs"
    fi
    info "using dist dir: $distdir; using stage: ${rpmbuild_stage}"
    for spkg in "$distdir/sambacc"-*.tar.gz; do
        info "RPM build for: ${spkg}"
        ver="$(basename  "${spkg}" | sed -e 's/^sambacc-//' -e 's/.tar.gz$//')"
        if echo "$ver" | grep -q "+" ; then
            rversion="$(echo "${ver}" | sed -e 's/\.dev/~/' -e 's/+/./')"
        else
            rversion="$ver"
        fi
        info "Using rpm-version=${rversion} pkg-version=${ver}"
        tdir="$(mktemp -d)"
        (
            echo "%define pversion ${ver}"
            echo "%define rversion ${rversion}"
            tar -xf "$spkg" -O \
                "sambacc-${ver}/extras/python-sambacc.spec"
        ) > "${tdir}/python-sambacc.spec"
        rpmbuild_cmd=(rpmbuild "${rpmbuild_stage}" \
            -D "_rpmdir ${distdir}/RPMS" \
            -D "_srcrpmdir ${distdir}/SRPMS" \
            -D "_sourcedir $(dirname "${spkg}")" \
        )
        if [ "$VENDOR_DIST_SUFFIX" ]; then
            rpmbuild_cmd+=(-D "vendordist ${VENDOR_DIST_SUFFIX}")
        fi
        "${rpmbuild_cmd[@]}" "${tdir}/python-sambacc.spec"
        rm -rf "${tdir}"
    done
}

task_gen_sums() {
    if [ "$distname" ]; then
        info "generating checksums"
        distdir="$(get_distdir "$distname")"
        info "using dist dir: $distdir"
        (cd "$distdir" && \
            find . -type f -not -name 'sha*sums' -print0 | \
            xargs -0 sha512sum  > "$distdir/sha512sums")
    fi
}

cleanup() {
    if [ -z "${distname}" ]; then
        info "cleaning scratch dist dir"
        rm -rf "$(get_distdir "$distname")"
    fi
}

if ! command -v "${dnf_cmd}" >/dev/null ; then
    dnf_cmd=yum
fi

# Allow the tests to use customized passwd file contents in order
# to test samba passdb support. It's a bit strange, but should work.
# The test suite tries to restore the passwd file after changing it,
# but you probably don't want to enable these on your desktop.
# TODO: actually use nss-wrapper
export WRITABLE_PASSWD=yes
export NSS_WRAPPER_PASSWD=/etc/passwd
export NSS_WRAPPER_GROUP=/etc/group

# when called with --install as the first argument, go into a special mode
# typically used to just install the container's dependency packages
if [[ "$1" = "--install" ]]; then
    task_sys_deps
    exit $?
fi

# if critical packages (currently just git) are missing we assume that
# we need to automatically enable the task_sys_deps step.
# this step is not enabled by default due to the overhead that updating
# the dnf repos creates on the overall build time.
if ! command -v git &>/dev/null ; then
    tasks="$tasks task_sys_deps"
fi

trap cleanup EXIT

chk task_sys_deps
setup_fetch "$2"
cd "${bdir}"
setup_update "$1"

chk task_test_tox
chk task_py_build
chk task_rpm_build
chk task_gen_sums
0707010000005D000081A4000000000000000000000001684BE19C00001A32000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/tests/test_addc.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import os

import pytest

import sambacc.addc


def _fake_samba_tool(path):
    fake_samba_tool = path / "fake_samba_tool.sh"
    with open(fake_samba_tool, "w") as fh:
        fh.write("#!/bin/sh\n")
        fh.write(f"[ -e {path}/fail ] && exit 1\n")
        fh.write(f'echo "$@" > {path}/args.out\n')
        fh.write("exit 0")
    os.chmod(fake_samba_tool, 0o700)
    return fake_samba_tool


def test_provision(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.provision("FOOBAR.TEST", "quux", "h4ckm3")
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "--realm=FOOBAR.TEST" in result
    assert "--option=netbios name=quux" in result
    assert "--dns-backend=SAMBA_INTERNAL" in result

    sambacc.addc.provision(
        "BARFOO.TEST",
        "quux",
        "h4ckm3",
        options=[
            ("ldap server require strong auth", "no"),
            ("dns zone scavenging", "yes"),
            ("ldap machine suffix", "ou=Machines"),
            ("netbios name", "flipper"),
        ],
    )
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "--realm=BARFOO.TEST" in result
    assert "--option=netbios name=quux" in result
    assert "--dns-backend=SAMBA_INTERNAL" in result
    assert "--option=ldap server require strong auth=no" in result
    assert "--option=dns zone scavenging=yes" in result
    assert "--option=ldap machine suffix=ou=Machines" in result
    assert "--option=netbios name=flipper" not in result

    open(tmp_path / "fail", "w").close()
    with pytest.raises(Exception):
        sambacc.addc.provision("FOOBAR.TEST", "quux", "h4ckm3")


def test_join(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.join("FOOBAR.TEST", "quux", "h4ckm3")
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "FOOBAR.TEST" in result
    assert "--option=netbios name=quux" in result
    assert "--dns-backend=SAMBA_INTERNAL" in result

    sambacc.addc.join(
        "BARFOO.TEST",
        "quux",
        "h4ckm3",
        options=[
            ("ldap server require strong auth", "no"),
            ("dns zone scavenging", "yes"),
            ("ldap machine suffix", "ou=Machines"),
            ("netbios name", "flipper"),
        ],
    )
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "BARFOO.TEST" in result
    assert "--option=netbios name=quux" in result
    assert "--dns-backend=SAMBA_INTERNAL" in result
    assert "--option=ldap server require strong auth=no" in result
    assert "--option=dns zone scavenging=yes" in result
    assert "--option=ldap machine suffix=ou=Machines" in result
    assert "--option=netbios name=flipper" not in result


def test_create_user(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.create_user("fflintstone", "b3dr0ck", "Flintstone", "Fred")
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "user create fflintstone" in result
    assert "--surname=Flintstone" in result
    assert "--given-name=Fred" in result


def test_create_ou(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.create_ou("quarry_workers")
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "ou add OU=quarry_workers" in result


def test_create_group(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.create_group("quarry_workers")
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "group add quarry_workers" in result


def test_add_group_members(tmp_path, monkeypatch):
    monkeypatch.setattr(
        sambacc.samba_cmds, "_GLOBAL_PREFIX", [_fake_samba_tool(tmp_path)]
    )

    sambacc.addc.add_group_members(
        "quarry_workers", ["fflintstone", "brubble"]
    )
    with open(tmp_path / "args.out") as fh:
        result = fh.read()
    assert "group addmembers quarry_workers" in result
    assert "fflintstone,brubble" in result


@pytest.mark.parametrize(
    "cfg,ifaces,expected",
    [
        ({}, ["foo", "bar"], None),
        (
            {"include_pattern": "^eth.*$"},
            ["lo", "eth0", "eth1", "biff1"],
            ["lo", "eth0", "eth1"],
        ),
        (
            {"include_pattern": "^nope$"},
            ["lo", "eth0", "eth1", "biff1"],
            ["lo"],
        ),
        (
            {"include_pattern": "^biff[0-9]+$"},
            ["lo", "eth0", "eth1", "biff1"],
            ["lo", "biff1"],
        ),
        (
            {"exclude_pattern": "^docker[0-9]+$"},
            ["lo", "eno1", "eno2", "docker0"],
            ["lo", "eno1", "eno2"],
        ),
        (
            {"exclude_pattern": "^.*$"},
            ["lo", "eno1", "eno2", "docker0"],
            ["lo"],
        ),
        (
            {
                "include_pattern": "^[ed].*$",
                "exclude_pattern": "^docker[0-9]+$",
            },
            ["lo", "eno1", "eno2", "docker0"],
            ["lo", "eno1", "eno2"],
        ),
        (
            {
                "include_pattern": "^[ed].*$",
                "exclude_pattern": "^.*0$",
            },
            ["lo", "dx1f2", "docker0"],
            ["lo", "dx1f2"],
        ),
    ],
)
def test_filtered_interfaces(cfg, ifaces, expected):
    ic = sambacc.config.DCInterfaceConfig(cfg)
    if cfg:
        assert ic.configured
        assert sambacc.addc.filtered_interfaces(ic, ifaces) == expected
    else:
        assert not ic.configured
0707010000005E000081A4000000000000000000000001684BE19C00001F94000000000000000000000000000000000000003A00000000sambacc-v0.6+git.60.2f89a38/tests/test_commands_config.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import argparse
import functools
import os

import sambacc.config
import sambacc.opener
import sambacc.paths

import sambacc.commands.config


config1 = """
{
    "samba-container-config": "v0",
    "configs": {
        "updateme":{
          "shares": ["uno"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "uno": {
          "options": {
              "path": "/srv/data/uno"
          }
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}
"""

config2 = """
{
    "samba-container-config": "v0",
    "configs": {
        "updateme":{
          "shares": ["uno", "dos"],
          "globals": ["global0"],
          "permissions": {"method": "none"}
        }
    },
    "shares": {
       "uno": {
          "options": {
              "path": "/srv/data/uno"
          }
       },
       "dos": {
          "options": {
              "path": "/srv/data/dos"
          }
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}
"""


class FakeContext:
    cli: argparse.Namespace
    instance_config: sambacc.config.InstanceConfig

    def __init__(self, opts, instance_config):
        self.cli = argparse.Namespace()
        self.instance_config = instance_config
        for k, v in opts.items():
            setattr(self.cli, k, v)
        self.require_validation = False

    @classmethod
    def defaults(cls, cfg_path, watch=False):
        with open(cfg_path, "w") as fh:
            fh.write(config1)

        config = [cfg_path]
        identity = "updateme"
        ctx = cls(
            {
                "watch": watch,
                "config": config,
                "identity": identity,
            },
            sambacc.config.read_config_files(config).get(identity),
        )
        return ctx

    @property
    def opener(self) -> sambacc.opener.Opener:
        return sambacc.opener.FileOpener()


class FakeWaiter:
    def __init__(self, attempts=None):
        self.count = 0
        self.on_count = {}
        self.attempts = attempts

    def acted(self):
        pass

    def wait(self):
        if self.attempts is not None and self.count >= self.attempts:
            raise KeyboardInterrupt()
        wf = self.on_count.get(self.count, None)
        if wf is not None:
            wf()
        self.count += 1


def _gen_fake_cmd(fake_path, chkpath, pnn="0"):
    with open(fake_path, "w") as fh:
        fh.write("#!/bin/sh\n")
        fh.write(f'echo "$@" >> {chkpath}\n')
        fh.write(f'[ "$1" = ctdb ] && echo {pnn}" " ; \n')
        fh.write("exit 0\n")
    os.chmod(fake_path, 0o755)


def test_update_config_changed(tmp_path, monkeypatch):
    cfg_path = str(tmp_path / "config")
    fake = tmp_path / "fake.sh"
    chkpath = tmp_path / ".executed"
    _gen_fake_cmd(fake, str(chkpath))
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    ctx = FakeContext.defaults(cfg_path)
    with open(cfg_path, "w") as fh:
        fh.write(config2)
    monkeypatch.setattr(
        sambacc.paths,
        "ensure_share_dirs",
        functools.partial(
            sambacc.paths.ensure_share_dirs, root=str(tmp_path / "_root")
        ),
    )
    sambacc.commands.config.update_config(ctx)

    assert os.path.exists(chkpath)
    chk = open(chkpath).readlines()
    assert any(("net" in line) for line in chk)
    assert any(("smbcontrol" in line) for line in chk)


def test_update_config_changed_ctdb(tmp_path, monkeypatch):
    cfg_path = str(tmp_path / "config")
    fake = tmp_path / "fake.sh"
    chkpath = tmp_path / ".executed"
    _gen_fake_cmd(fake, str(chkpath))
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    ctx = FakeContext.defaults(cfg_path)
    ctx.instance_config.iconfig["instance_features"] = ["ctdb"]
    assert ctx.instance_config.with_ctdb
    with open(cfg_path, "w") as fh:
        fh.write(config2)
    monkeypatch.setattr(
        sambacc.paths,
        "ensure_share_dirs",
        functools.partial(
            sambacc.paths.ensure_share_dirs, root=str(tmp_path / "_root")
        ),
    )
    sambacc.commands.config.update_config(ctx)

    assert os.path.exists(chkpath)
    chk = open(chkpath).readlines()
    assert any(("net" in line) for line in chk)
    assert any(("smbcontrol" in line) for line in chk)


def test_update_config_ctdb_notleader(tmp_path, monkeypatch):
    cfg_path = str(tmp_path / "config")
    fake = tmp_path / "fake.sh"
    chkpath = tmp_path / ".executed"
    _gen_fake_cmd(fake, str(chkpath), pnn="")
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    ctx = FakeContext.defaults(cfg_path)
    ctx.instance_config.iconfig["instance_features"] = ["ctdb"]
    assert ctx.instance_config.with_ctdb
    with open(cfg_path, "w") as fh:
        fh.write(config2)
    monkeypatch.setattr(
        sambacc.paths,
        "ensure_share_dirs",
        functools.partial(
            sambacc.paths.ensure_share_dirs, root=str(tmp_path / "_root")
        ),
    )
    sambacc.commands.config.update_config(ctx)

    assert os.path.exists(chkpath)
    chk = open(chkpath).readlines()
    assert not any(("net" in line) for line in chk)
    assert not any(("smbcontrol" in line) for line in chk)


def test_update_config_watch_waiter_expires(tmp_path, monkeypatch):
    cfg_path = str(tmp_path / "config")
    fake = tmp_path / "fake.sh"
    chkpath = tmp_path / ".executed"
    _gen_fake_cmd(fake, str(chkpath))
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    fake_waiter = FakeWaiter(attempts=5)

    def _fake_waiter(*args, **kwargs):
        return fake_waiter

    monkeypatch.setattr(sambacc.commands.config, "best_waiter", _fake_waiter)

    ctx = FakeContext.defaults(cfg_path, watch=True)
    monkeypatch.setattr(
        sambacc.paths,
        "ensure_share_dirs",
        functools.partial(
            sambacc.paths.ensure_share_dirs, root=str(tmp_path / "_root")
        ),
    )
    sambacc.commands.config.update_config(ctx)

    assert not os.path.exists(chkpath)
    assert fake_waiter.count == 5


def test_update_config_watch_waiter_trigger3(tmp_path, monkeypatch):
    cfg_path = str(tmp_path / "config")
    fake = tmp_path / "fake.sh"
    chkpath = tmp_path / ".executed"
    _gen_fake_cmd(fake, str(chkpath))
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    fake_waiter = FakeWaiter(attempts=5)

    def _fake_waiter(*args, **kwargs):
        return fake_waiter

    def _new_conf():
        with open(cfg_path, "w") as fh:
            fh.write(config2)

    monkeypatch.setattr(sambacc.commands.config, "best_waiter", _fake_waiter)
    fake_waiter.on_count[3] = _new_conf

    ctx = FakeContext.defaults(cfg_path, watch=True)
    monkeypatch.setattr(
        sambacc.paths,
        "ensure_share_dirs",
        functools.partial(
            sambacc.paths.ensure_share_dirs, root=str(tmp_path / "_root")
        ),
    )
    sambacc.commands.config.update_config(ctx)

    assert os.path.exists(chkpath)
    chk = open(chkpath).readlines()
    assert any(("net" in line) for line in chk)
    assert any(("smbcontrol" in line) for line in chk)
    assert fake_waiter.count == 5
0707010000005F000081A4000000000000000000000001684BE19C0000774C000000000000000000000000000000000000003100000000sambacc-v0.6+git.60.2f89a38/tests/test_config.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import os
import unittest

import pytest

import sambacc.config
import sambacc.opener

config1 = """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["demo", "stuff"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "demo": {
          "options": {
              "path": "/mnt/demo"
          }
       },
       "stuff": {
          "options": {
              "path": "/mnt/stuff"
          }
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}
"""

config2 = """
{
  "samba-container-config": "v0",
  "configs": {
    "foobar": {
      "shares": [
        "share"
      ],
      "globals": ["global0"],
      "instance_name": "GANDOLPH",
      "permissions": {
          "method": "none"
      }
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "read only": "no",
        "valid users": "sambauser",
        "guest ok": "no",
        "force user": "root"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "workgroup": "SAMBA",
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
      "all_entries": [
        {"name": "bob", "password": "notSoSafe"},
        {"name": "alice", "password": "123fakeStreet"},
        {"name": "carol", "nt_hash": "B784E584D34839235F6D88A5382C3821"}
      ]
  },
  "_extra_junk": 0
}
"""

config3 = """
{
  "samba-container-config": "v0",
  "configs": {
    "foobar": {
      "shares": [
        "share"
      ],
      "globals": ["global0"],
      "instance_name": "RANDOLPH"
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "read only": "no",
        "valid users": "sambauser",
        "guest ok": "no",
        "force user": "root"
      },
      "permissions": {
          "method": "none"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "workgroup": "SAMBA",
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
      "all_entries": [
        {"name": "bob", "uid": 2000, "gid": 2000,
         "password": "notSoSafe"},
        {"name": "alice","uid": 2001, "gid": 2001,
         "password": "123fakeStreet"},
        {"name": "carol",
         "nt_hash": "B784E584D34839235F6D88A5382C3821"}
      ]
  },
  "groups": {
      "all_entries": [
        {"name": "bobs", "gid": 2000},
        {"name": "alii", "gid": 2001}
      ]
  }
}
"""

ctdb_config1 = """
{
  "samba-container-config": "v0",
  "configs": {
    "ctdb1": {
      "shares": [
        "demo"
      ],
      "globals": [
        "global0"
      ],
      "instance_features": ["ctdb"],
      "instance_name": "ceeteedeebee"
    }
  },
  "shares": {
    "demo": {
      "options": {
        "path": "/share"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "security": "user",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "users": {
    "all_entries": [
      {
        "name": "bob",
        "password": "notSoSafe"
      }
    ]
  }
}
"""

addc_config1 = """
{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "instance_features": ["addc"],
      "domain_settings": "sink",
      "instance_name": "dc1"
    }
  },
  "domain_settings": {
    "sink": {
      "realm": "DOMAIN1.SINK.TEST",
      "short_domain": "DOMAIN1",
      "admin_password": "Passw0rd"
    }
  },
  "domain_groups": {
    "sink": [
      {"name": "friends"},
      {"name": "gothamites"}
    ]
  },
  "domain_users": {
    "sink": [
      {
        "name": "bwayne",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Wayne",
        "member_of": ["friends", "gothamites"]
      },
      {
        "name": "ckent",
        "password": "1115Rose.",
        "given_name": "Clark",
        "surname": "Kent",
        "member_of": ["friends"]
      }
    ]
  }
}
"""

addc_config2 = """
{
  "samba-container-config": "v0",
  "configs": {
    "demo": {
      "instance_features": ["addc"],
      "domain_settings": "sink",
      "instance_name": "dc1"
    }
  },
  "domain_settings": {
    "sink": {
      "realm": "DOMAIN1.SINK.TEST",
      "short_domain": "DOMAIN1",
      "admin_password": "Passw0rd"
    }
  },
  "organizational_units": {
    "sink": [
      {"name": "friends"}
    ]
  },
  "domain_groups": {
    "sink": [
      {
        "name": "friends",
        "ou": "friends"
      },
      {"name": "gothamites"}
    ]
  },
  "domain_users": {
    "sink": [
      {
        "name": "bwayne",
        "password": "1115Rose.",
        "given_name": "Bruce",
        "surname": "Wayne",
        "member_of": ["friends", "gothamites"],
        "ou": "friends"
      },
      {
        "name": "ckent",
        "password": "1115Rose.",
        "given_name": "Clark",
        "surname": "Kent",
        "member_of": ["friends"],
        "ou": "friends"
      }
    ]
  }
}
"""


class TestConfig(unittest.TestCase):
    def test_non_json(self):
        with self.assertRaises(Exception):
            with open(os.devnull) as fh:
                sambacc.config.GlobalConfig(fh)

    def test_empty_json(self):
        with self.assertRaises(ValueError):
            fh = io.StringIO("{}")
            sambacc.config.GlobalConfig(fh)

    def test_bad_version(self):
        with self.assertRaises(ValueError):
            fh = io.StringIO('{"samba-container-config":"foo"}')
            sambacc.config.GlobalConfig(fh)

    def test_valid_parse(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        assert isinstance(g, sambacc.config.GlobalConfig)

    def test_get_config(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        assert len(list(ic.shares())) == 2

    def test_fail_get_config(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        with self.assertRaises(KeyError):
            g.get("wobble")

    def test_get_global_opts(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        gopts = list(ic.global_options())
        assert ("server min protocol", "SMB2") in gopts

    def test_get_share_opts(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        shares = list(ic.shares())
        for share in shares:
            if share.name == "demo":
                assert ("path", "/mnt/demo") in list(share.share_options())
            elif share.name == "stuff":
                assert ("path", "/mnt/stuff") in list(share.share_options())
            else:
                raise AssertionError(share.name)

    def test_get_share_paths(self):
        fh = io.StringIO(config1)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        shares = list(ic.shares())
        for share in shares:
            if share.name == "demo":
                assert share.path() == "/mnt/demo"
            elif share.name == "stuff":
                assert share.path() == "/mnt/stuff"
            else:
                raise AssertionError(share.name)

    def test_unique_name(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        assert ("netbios name", "GANDOLPH") in list(ic.global_options())

    def test_many_global_opts(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        assert len(list(ic.global_options())) == (8 + 1)

    def test_some_users(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        users = list(ic.users())
        assert len(users) == 3
        assert users[0].username == "bob"
        assert users[0].uid == 1000
        assert users[0].gid == 1000
        pwline = ":".join(users[2].passwd_fields())
        assert pwline == "carol:x:1002:1002::/invalid:/bin/false"

    def test_some_groups(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        groups = list(ic.groups())
        assert len(groups) == 3
        assert groups[0].groupname == "bob"
        assert groups[0].gid == 1000

    def test_invalid_user_entry(self):
        rec = {"name": "foo", "uid": "fred"}
        with pytest.raises(ValueError):
            sambacc.config.UserEntry(None, rec, 0)
        rec = {"name": "foo", "uid": 2200, "gid": "beep"}
        with pytest.raises(ValueError):
            sambacc.config.UserEntry(None, rec, 0)

    def test_invalid_group_entry(self):
        rec = {"name": "foo", "gid": "boop"}
        with pytest.raises(ValueError):
            sambacc.config.GroupEntry(None, rec, 0)

    def test_user_entry_fields(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        rec = {"name": "jim", "uid": 2200, "gid": 2200}
        ue = sambacc.config.UserEntry(ic, rec, 0)
        assert ue.uid == 2200
        assert ue.gid == 2200
        assert ue.plaintext_passwd == ""

        rec = {"name": "jim", "password": "letmein"}
        ue = sambacc.config.UserEntry(ic, rec, 10)
        assert ue.uid == 1010
        assert ue.gid == 1010
        assert ue.plaintext_passwd == "letmein"

        rec = {"name": "jim", "nt_hash": "544849536973494D504F535349424C45"}
        ue = sambacc.config.UserEntry(ic, rec, 10)
        assert ue.uid == 1010
        assert ue.gid == 1010
        assert ue.plaintext_passwd == ""
        assert ue.nt_passwd == b"THISisIMPOSSIBLE"

    def test_group_entry_fields(self):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")

        rec = {"name": "hackers", "gid": 2200}
        ue = sambacc.config.GroupEntry(ic, rec, 0)
        assert ue.gid == 2200

        rec = {"name": "hackers"}
        ue = sambacc.config.GroupEntry(ic, rec, 20)
        assert ue.gid == 1020

    def test_explicitly_defined_groups(self):
        fh = io.StringIO(config3)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        groups = list(ic.groups())
        assert len(groups) == 3
        assert groups[0].groupname == "bobs"
        assert groups[0].gid == 2000
        assert groups[1].groupname == "alii"
        assert groups[1].gid == 2001
        assert groups[2].groupname == "carol"
        assert groups[2].gid == 1002


def test_read_config_files(tmpdir):
    fname = tmpdir / "sample.json"
    with open(fname, "w") as fh:
        fh.write(config1)
    sambacc.config.read_config_files([fname])


def test_read_config_files_noexist(tmpdir):
    fake1 = tmpdir / "fake1"
    fake2 = tmpdir / "fake2"
    with pytest.raises(ValueError):
        sambacc.config.read_config_files([fake1, fake2])


def test_read_config_files_realerr(tmpdir):
    if os.getuid() == 0:
        pytest.skip("test invalid when uid=0")
    fname = tmpdir / "sample.json"
    with open(fname, "w") as fh:
        fh.write(config1)
    # Prevent reading of the file to test any other error than
    # ENOENT is raised.
    os.chmod(fname, 0o333)
    try:
        with pytest.raises(OSError):
            sambacc.config.read_config_files([fname])
    finally:
        os.unlink(fname)


def test_tesd_config_files_realerr_rootok(monkeypatch):
    def err_open(*args):
        raise OSError("test!")

    monkeypatch.setattr(sambacc.opener.FileOpener, "open", err_open)
    fname = "/etc/foobar"
    with pytest.raises(OSError):
        sambacc.config.read_config_files([fname])


def test_instance_with_ctdb():
    c1 = sambacc.config.GlobalConfig(io.StringIO(config1))
    i1 = c1.get("foobar")
    assert not i1.with_ctdb

    c2 = sambacc.config.GlobalConfig(io.StringIO(ctdb_config1))
    i2 = c2.get("ctdb1")
    assert i2.with_ctdb


def test_instance_ctdb_smb_config():
    c1 = sambacc.config.GlobalConfig(io.StringIO(config1))
    i1 = c1.get("foobar")
    with pytest.raises(ValueError):
        i1.ctdb_smb_config()

    c2 = sambacc.config.GlobalConfig(io.StringIO(ctdb_config1))
    i2 = c2.get("ctdb1")
    csc = i2.ctdb_smb_config()
    gopts = dict(csc.global_options())
    assert gopts["clustering"] == "yes"
    assert gopts["include"] == "registry"
    assert csc.shares() == []


def test_instance_ctdb_config():
    c1 = sambacc.config.GlobalConfig(io.StringIO(config1))
    i1 = c1.get("foobar")
    assert i1.ctdb_config() == {}

    c2 = sambacc.config.GlobalConfig(io.StringIO(ctdb_config1))
    i2 = c2.get("ctdb1")
    cfg = i2.ctdb_config()
    assert "cluster_meta_uri" in cfg
    assert "nodes_path" in cfg
    assert "log_level" in cfg


def test_ad_dc_config_demo():
    c1 = sambacc.config.GlobalConfig(io.StringIO(addc_config1))
    i1 = c1.get("demo")
    assert i1.with_addc

    domcfg = i1.domain()
    assert domcfg.realm == "DOMAIN1.SINK.TEST"
    assert domcfg.short_domain == "DOMAIN1"
    assert domcfg.dcname == "dc1"

    dgroups = sorted(i1.domain_groups(), key=lambda v: v.groupname)
    assert len(dgroups) == 2
    assert dgroups[0].groupname == "friends"

    dusers = sorted(i1.domain_users(), key=lambda v: v.username)
    assert len(dusers) == 2
    assert dusers[0].username == "bwayne"


def test_ad_dc_ou_config_demo():
    c1 = sambacc.config.GlobalConfig(io.StringIO(addc_config2))
    i1 = c1.get("demo")
    assert i1.with_addc

    domou = sorted(i1.organizational_units(), key=lambda v: v.ou_name)
    assert len(domou) == 1
    assert domou[0].ou_name == "friends"

    dgroups = sorted(i1.domain_groups(), key=lambda v: v.groupname)
    assert len(dgroups) == 2
    assert dgroups[0].ou == "friends"

    dusers = sorted(i1.domain_users(), key=lambda v: v.username)
    assert len(dusers) == 2
    assert dusers[0].ou == "friends"


def test_ad_dc_invalid():
    c1 = sambacc.config.GlobalConfig(io.StringIO(config1))
    i1 = c1.get("foobar")
    assert not i1.with_addc

    with pytest.raises(ValueError):
        i1.domain()

    with pytest.raises(ValueError):
        list(i1.domain_users())

    with pytest.raises(ValueError):
        list(i1.domain_groups())

    with pytest.raises(ValueError):
        list(i1.organizational_units())


def test_share_config_no_path():
    j = """{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["flunk"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "flunk": {
          "options": {}
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}"""
    fh = io.StringIO(j)
    g = sambacc.config.GlobalConfig(fh)
    ic = g.get("foobar")
    shares = list(ic.shares())
    assert len(shares) == 1
    assert shares[0].path() is None


@pytest.mark.parametrize(
    "json_a,json_b,iname,expect_equal",
    [
        (config1, config1, "foobar", True),
        (addc_config1, addc_config1, "demo", True),
        (config1, config2, "foobar", False),
        (
            """{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["flunk"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "flunk": {
          "options": {"path": "/mnt/yikes"}
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}""",
            """{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["flunk"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "flunk": {
          "options": {"path": "/mnt/psych"}
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}""",
            "foobar",
            False,
        ),
        (
            """{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["flunk"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "flunk": {
          "options": {"path": "/mnt/yikes"}
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB2"
           }
        }
    }
}""",
            """{
    "samba-container-config": "v0",
    "configs": {
        "foobar":{
          "shares": ["flunk"],
          "globals": ["global0"]
        }
    },
    "shares": {
       "flunk": {
          "options": {"path": "/mnt/yikes"}
       }
    },
    "globals": {
        "global0": {
           "options": {
             "server min protocol": "SMB1"
           }
        }
    }
}""",
            "foobar",
            False,
        ),
    ],
    # setting ID to a numeric range makes it a lot easier to read the
    # output on the console, versus having pytest plop two large json
    # blobs for each "row" of inputs
    ids=iter(range(100)),
)
def test_instance_config_equality(json_a, json_b, iname, expect_equal):
    gca = sambacc.config.GlobalConfig(io.StringIO(json_a))
    gcb = sambacc.config.GlobalConfig(io.StringIO(json_b))
    instance_a = gca.get(iname)
    instance_b = gcb.get(iname)
    if expect_equal:
        assert instance_a == instance_b
    else:
        assert instance_a != instance_b


def test_permissions_config_default():
    c1 = sambacc.config.GlobalConfig(io.StringIO(config1))
    ic = c1.get("foobar")
    for share in ic.shares():
        assert share.permissions_config().method == "none"


def test_permissions_config_instance():
    c2 = sambacc.config.GlobalConfig(io.StringIO(config2))
    ic = c2.get("foobar")
    # TODO: improve test to ensure this isn't getting the default.  it does
    # work as designed based on coverage, but we shouldn't rely on that
    for share in ic.shares():
        assert share.permissions_config().method == "none"


def test_permissions_config_share():
    c3 = sambacc.config.GlobalConfig(io.StringIO(config3))
    ic = c3.get("foobar")
    # TODO: improve test to ensure this isn't getting the default.  it does
    # work as designed based on coverage, but we shouldn't rely on that
    for share in ic.shares():
        assert share.permissions_config().method == "none"


def test_permissions_config_options():
    pc = sambacc.config.PermissionsConfig(
        {
            "method": "initialize-share-perms",
            "status_xattr": "user.fake-stuff",
            "mode": "0777",
            "friendship": "always",
        }
    )
    opts = pc.options
    assert len(opts) == 2
    assert "mode" in opts
    assert "friendship" in opts


def _can_import_toml():
    """Return true if one valid toml module can be imported.
    Work around importorskip only supporting one module name.
    """
    try:
        __import__("tomllib")
        return True
    except ImportError:
        pass
    try:
        __import__("tomli")
        return True
    except ImportError:
        pass
    return False


@pytest.mark.parametrize(
    "json_str,ok",
    [
        pytest.param("{}", False, id="empty-json"),
        pytest.param('{"samba-container-config":"v0"}', True, id="minimal"),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "shares": [
                "foobar"
            ]
        }
    },
    "shares": {
        "foobar": {
            "options": {
                "a": "b"
            }
        }
    }
}
        """,
            True,
            id="one-share",
        ),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "shares": [
                "foobar"
            ]
        }
    },
    "shares": {
        "foobar": {
            "options": {
                "a": "b"
            }
        }
    },
    "chairs": {"maple": true}
}
        """,
            False,
            id="invalid-section",
        ),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "shares": [
                "foobar"
            ]
        }
    },
    "shares": {
        "foobar": {
            "options": {
                "a": "b"
            }
        }
    },
    "_chairs": {"maple": true}
}
        """,
            True,
            id="underscore-prefix",
        ),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "lazlo": "quux",
            "shares": [
                "foobar"
            ]
        }
    },
    "shares": {
        "foobar": {
            "options": {
                "a": "b"
            }
        }
    }
}
        """,
            False,
            id="bad-config-param",
        ),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "shares": "foobar"
        }
    },
    "shares": {
        "foobar": {
            "options": {
                "a": "b"
            }
        }
    }
}
        """,
            False,
            id="bad-config-type",
        ),
        pytest.param(
            """
{
    "samba-container-config": "v0",
    "configs": {
        "foobar": {
            "shares": [
                "foobar"
            ]
        }
    },
    "shares": {
        "foobar": {
            "blooper": true,
            "options": {
                "a": "b"
            }
        }
    }
}
        """,
            False,
            id="bad-share-prop",
        ),
    ],
)
def test_jsonschema_validation(json_str, ok):
    jsonschema = pytest.importorskip("jsonschema")

    cfg = sambacc.config.GlobalConfig()
    if ok:
        cfg.load(io.StringIO(json_str), require_validation=True)
    else:
        with pytest.raises((ValueError, jsonschema.ValidationError)):
            cfg.load(io.StringIO(json_str), require_validation=True)


@pytest.mark.parametrize(
    "toml_str,ok",
    [
        pytest.param("", False, id="empty"),
        pytest.param("#####FOO", False, id="just-a-comment"),
        pytest.param(
            """
samba-container-config = "v0"
""",
            True,
            id="minimal",
        ),
        pytest.param(
            """
samba-container-config = "v0"

# Define configurations
[configs.foobar]
shares = ["foobar"]

# Define share options
[shares.foobar.options]
a = "b"
""",
            True,
            id="one-share",
        ),
    ],
)
@pytest.mark.skipif(not _can_import_toml(), reason="no toml module")
def test_toml_configs_no_validation(toml_str, ok):
    cfg = sambacc.config.GlobalConfig()
    fh = io.BytesIO(toml_str.encode("utf8"))
    if ok:
        cfg.load(
            fh,
            require_validation=False,
            config_format=sambacc.config.ConfigFormat.TOML,
        )
    else:
        with pytest.raises(ValueError):
            cfg.load(
                fh,
                require_validation=False,
                config_format=sambacc.config.ConfigFormat.TOML,
            )


@pytest.mark.parametrize(
    "toml_str,ok",
    [
        pytest.param("", False, id="empty"),
        pytest.param("#####FOO", False, id="just-a-comment"),
        pytest.param(
            """
samba-container-config = "v0"
""",
            True,
            id="minimal",
        ),
        pytest.param(
            """
samba-container-config = "v0"

# Define configurations
[configs.foobar]
shares = ["foobar"]

# Define share options
[shares.foobar.options]
a = "b"
""",
            True,
            id="one-share",
        ),
        pytest.param(
            """
samba-container-config = "v0"

# Define configurations
[configs.foobar]
shares = ["foobar"]
instance_features = "Kibbe"

# Define share options
[shares.foobar.options]
a = "b"
""",
            False,
            id="bad-instance_features",
        ),
        pytest.param(
            """
samba-container-config = "v0"

# Define configurations
[configs.foobar]
shares = ["foobar"]
instance_features = ["ctdb"]

# Define share options
[shares.foobar.options]
a = "b"
""",
            True,
            id="ok-instance_features",
        ),
        pytest.param(
            """
samba-container-config = "v0"

[configs.demo]
shares = ["share"]
globals = ["default"]
instance_features = ["ctdb"]
instance_name = "SAMBA"

[shares.share.options]
"path" = "/share"
"read only" = "no"
"valid users" = "sambauser, otheruser"

[globals.default.options]
"security" = "user"
"server min protocol" = "SMB2"
"load printers" = "no"
"printing" = "bsd"
"printcap name" = "/dev/null"
"disable spoolss" = "yes"
"guest ok" = "no"

[[users.all_entries]]
name = "sambauser"
password = "samba"

[[users.all_entries]]
name = "otheruser"
password = "insecure321"
""",
            True,
            id="complex",
        ),
    ],
)
@pytest.mark.skipif(not _can_import_toml(), reason="no toml module")
def test_toml_configs_validation(toml_str, ok):
    jsonschema = pytest.importorskip("jsonschema")

    cfg = sambacc.config.GlobalConfig()
    fh = io.BytesIO(toml_str.encode("utf8"))
    if ok:
        cfg.load(
            fh,
            require_validation=True,
            config_format=sambacc.config.ConfigFormat.TOML,
        )
    else:
        with pytest.raises((ValueError, jsonschema.ValidationError)):
            cfg.load(
                fh,
                require_validation=True,
                config_format=sambacc.config.ConfigFormat.TOML,
            )


@pytest.mark.parametrize(
    "yaml_str,ok",
    [
        pytest.param("", False, id="empty"),
        pytest.param("#####FOO", False, id="just-a-comment"),
        pytest.param(
            """
samba-container-config: "v0"
""",
            True,
            id="minimal",
        ),
        pytest.param(
            """
samba-container-config: "v0"
# Define configurations
configs:
  foobar:
    shares:
      - foobar
shares:
  foobar:
    options:
      a: b
""",
            True,
            id="one-share",
        ),
    ],
)
def test_yaml_configs_no_validation(yaml_str, ok):
    pytest.importorskip("yaml")

    cfg = sambacc.config.GlobalConfig()
    fh = io.BytesIO(yaml_str.encode("utf8"))
    if ok:
        cfg.load(
            fh,
            require_validation=False,
            config_format=sambacc.config.ConfigFormat.YAML,
        )
    else:
        with pytest.raises(ValueError):
            cfg.load(
                fh,
                require_validation=False,
                config_format=sambacc.config.ConfigFormat.YAML,
            )


@pytest.mark.parametrize(
    "yaml_str,ok",
    [
        pytest.param("", False, id="empty"),
        pytest.param("#####FOO", False, id="just-a-comment"),
        pytest.param(
            """
samba-container-config: v0
""",
            True,
            id="minimal",
        ),
        pytest.param(
            """
samba-container-config: v0
# Define configurations
configs:
  foobar:
    shares:
      - foobar
# Define shares
shares:
  foobar:
    options:
      a: b
""",
            True,
            id="one-share",
        ),
        pytest.param(
            """
samba-container-config: v0
configs:
  foobar:
    instance_features: baroof
    shares:
      - foobar
shares:
  foobar:
    options:
      a: b
""",
            False,
            id="bad-instance_features",
        ),
        pytest.param(
            """
samba-container-config: v0
configs:
  foobar:
    instance_features:
      - ctdb
    shares:
      - foobar
shares:
  foobar:
    options:
      a: b
""",
            True,
            id="ok-instance_features",
        ),
        pytest.param(
            """
samba-container-config: "v0"
# Configure our demo instance
configs:
  demo:
    shares: ["share"]
    globals: ["default"]
    instance_features: ["ctdb"]
    instance_name: "SAMBA"
# Configure the share
shares:
  share:
    options:
      "path": "/share"
      "read only": "no"
      "valid users": "sambauser, otheruser"
# Configure globals
globals:
  default:
    options:
      "security": "user"
      "server min protocol": "SMB2"
      "load printers": "no"
      "printing": "bsd"
      "printcap name": "/dev/null"
      "disable spoolss": "yes"
      "guest ok": "no"
# Configure users
users:
  all_entries:
    - {"name": "sambauser", "password": "samba"}
    - {"name": "otheruser", "password": "insecure321"}
    - name: cooluser
      password: snarf
""",
            True,
            id="complex",
        ),
    ],
)
def test_yaml_configs_validation(yaml_str, ok):
    pytest.importorskip("yaml")
    jsonschema = pytest.importorskip("jsonschema")

    cfg = sambacc.config.GlobalConfig()
    fh = io.BytesIO(yaml_str.encode("utf8"))
    if ok:
        cfg.load(
            fh,
            require_validation=True,
            config_format=sambacc.config.ConfigFormat.YAML,
        )
    else:
        with pytest.raises((ValueError, jsonschema.ValidationError)):
            cfg.load(
                fh,
                require_validation=True,
                config_format=sambacc.config.ConfigFormat.YAML,
            )
07070100000060000081A4000000000000000000000001684BE19C0000164B000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_container_dns.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import time

import sambacc.container_dns


J1 = """
{
  "ref": "example",
  "items": [
    {
      "name": "users",
      "ipv4": "192.168.76.40",
      "target": "external"
    },
    {
      "name": "users-cluster",
      "ipv4": "10.235.102.5",
      "target": "internal"
    }
  ]
}
"""

J2 = """
{
  "ref": "example2",
  "items": [
    {
      "name": "users-cluster",
      "ipv4": "10.235.102.5",
      "target": "internal"
    }
  ]
}
"""

J3 = """
{
  "ref": "example",
  "items": [
    {
      "name": "users",
      "ipv4": "192.168.76.108",
      "target": "external"
    },
    {
      "name": "users-cluster",
      "ipv4": "10.235.102.5",
      "target": "internal"
    }
  ]
}
"""


def test_parse():
    fh = io.StringIO(J1)
    hs = sambacc.container_dns.parse(fh)
    assert hs.ref == "example"
    assert len(hs.items) == 2
    assert hs.items[0].name == "users"
    assert hs.items[0].ipv4_addr == "192.168.76.40"


def test_parse2():
    fh = io.StringIO(J2)
    hs = sambacc.container_dns.parse(fh)
    assert hs.ref == "example2"
    assert len(hs.items) == 1
    assert hs.items[0].name == "users-cluster"
    assert hs.items[0].ipv4_addr == "10.235.102.5"


def test_same():
    hs1 = sambacc.container_dns.HostState(ref="apple")
    hs2 = sambacc.container_dns.HostState(ref="orange")
    assert hs1 != hs2
    hs2 = sambacc.container_dns.HostState(ref="apple")
    assert hs1 == hs2
    hs1.items = [
        sambacc.container_dns.HostInfo("a", "10.10.10.10", "external"),
        sambacc.container_dns.HostInfo("b", "10.10.10.11", "external"),
    ]
    hs2.items = [
        sambacc.container_dns.HostInfo("a", "10.10.10.10", "external"),
        sambacc.container_dns.HostInfo("b", "10.10.10.11", "external"),
    ]
    assert hs1 == hs2
    hs2.items = [
        sambacc.container_dns.HostInfo("a", "10.10.10.10", "external"),
        sambacc.container_dns.HostInfo("b", "10.10.10.12", "external"),
    ]
    assert hs1 != hs2
    hs2.items = [
        sambacc.container_dns.HostInfo("a", "10.10.10.10", "external")
    ]
    assert hs1 != hs2


def test_register_dummy(capfd):
    def register(iconfig, hs):
        return sambacc.container_dns.register(
            iconfig,
            hs,
            prefix=["echo"],
        )

    hs = sambacc.container_dns.HostState(
        ref="example",
        items=[
            sambacc.container_dns.HostInfo(
                "foobar", "10.10.10.10", "external"
            ),
            sambacc.container_dns.HostInfo(
                "blat", "192.168.10.10", "internal"
            ),
        ],
    )
    register("example.test", hs)
    out, err = capfd.readouterr()
    assert "net ads -P dns register foobar.example.test 10.10.10.10" in out


def test_parse_and_update(tmp_path):
    reg_data = []

    def _register(domain, hs, target_name=""):
        reg_data.append((domain, hs))
        return True

    path = tmp_path / "test.json"
    with open(path, "w") as fh:
        fh.write(J1)

    hs1, up = sambacc.container_dns.parse_and_update(
        "example.com", path, reg_func=_register
    )
    assert len(reg_data) == 1
    assert up
    hs2, up = sambacc.container_dns.parse_and_update(
        "example.com", path, previous=hs1, reg_func=_register
    )
    assert len(reg_data) == 1
    assert not up

    with open(path, "w") as fh:
        fh.write(J2)
    hs3, up = sambacc.container_dns.parse_and_update(
        "example.com", path, previous=hs2, reg_func=_register
    )
    assert len(reg_data) == 2
    assert reg_data[-1][1] == hs3
    assert up


def test_watch(tmp_path):
    reg_data = []

    def _register(domain, hs, target_name=""):
        reg_data.append((domain, hs))

    def _update(domain, source, previous=None):
        return sambacc.container_dns.parse_and_update(
            domain, source, previous=previous, reg_func=_register
        )

    scount = 0

    def _sleep():
        nonlocal scount
        scount += 1
        if scount > 10:
            raise KeyboardInterrupt()
        time.sleep(0.05)

    path = tmp_path / "test.json"
    with open(path, "w") as fh:
        fh.write(J1)

    sambacc.container_dns.watch(
        "example.com",
        path,
        update_func=_update,
        pause_func=_sleep,
        print_func=lambda x: None,
    )
    assert scount > 10
    assert len(reg_data) == 1

    with open(path, "w") as fh:
        fh.write(J1)
    scount = 0

    def _sleep2():
        nonlocal scount
        scount += 1
        if scount == 5:
            with open(path, "w") as fh:
                fh.write(J3)
        if scount == 10:
            with open(path, "w") as fh:
                fh.write(J3)
        if scount > 20:
            raise KeyboardInterrupt()
        time.sleep(0.05)

    sambacc.container_dns.watch(
        "example.com",
        path,
        update_func=_update,
        pause_func=_sleep2,
        print_func=lambda x: None,
    )
    assert scount > 20
    assert len(reg_data) == 3
07070100000061000081A4000000000000000000000001684BE19C00004C52000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/tests/test_ctdb.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import json
import os

import pytest

import sambacc.config
import sambacc.samba_cmds
from sambacc import ctdb


def test_migrate_tdb(tmpdir, monkeypatch):
    src = tmpdir / "src"
    os.mkdir(src)
    dst = tmpdir / "dst"
    os.mkdir(dst)
    fake = tmpdir / "fake.sh"
    monkeypatch.setattr(ctdb, "_SRC_TDB_DIRS", [str(src)])
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    with open(fake, "w") as fh:
        fh.write("#!/bin/sh\n")
        fh.write('[ "$1" == "ltdbtool" ] || exit 1\n')
        fh.write('[ "$2" == "convert" ] || exit 1\n')
        fh.write('exec cp "$4" "$5"\n')
    os.chmod(fake, 0o755)

    with open(src / "registry.tdb", "w") as fh:
        fh.write("fake")
    with open(src / "passdb.tdb", "w") as fh:
        fh.write("fake")
    with open(src / "mango.tdb", "w") as fh:
        fh.write("fake")

    ctdb.migrate_tdb(None, str(dst))

    assert os.path.exists(dst / "registry.tdb.0")
    assert os.path.exists(dst / "passdb.tdb.0")
    assert not os.path.exists(dst / "mango.tdb.0")


def test_ensure_ctdbd_etc_files(tmpdir):
    src = tmpdir / "src"
    os.mkdir(src)
    dst = tmpdir / "dst"
    os.mkdir(dst)

    # this largely just creates a bunch of symlinks so it doesn't
    # need much fakery.
    ctdb.ensure_ctdbd_etc_files(etc_path=dst, src_path=src)
    assert os.path.islink(dst / "functions")
    assert os.path.islink(dst / "notify.sh")
    assert os.path.islink(dst / "events/legacy/00.ctdb.script")


def test_pnn_in_nodes(tmpdir):
    nodes_json = tmpdir / "nodes.json"
    real_path = tmpdir / "nodes"

    with pytest.raises(Exception):
        ctdb.pnn_in_nodes(0, nodes_json, real_path)

    with open(nodes_json, "w") as fh:
        fh.write("{}")
    result = ctdb.pnn_in_nodes(0, nodes_json, real_path)
    assert not result

    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"node": "10.0.0.10", "pnn": 0, "state": "ready"},
                {"node": "10.0.0.11", "pnn": 1, "state": "new"}
            ]}
        """
        )
    result = ctdb.pnn_in_nodes(0, nodes_json, real_path)
    assert result
    result = ctdb.pnn_in_nodes(1, nodes_json, real_path)
    assert not result


class _Stop(Exception):
    pass


def test_manage_nodes(tmpdir, monkeypatch):
    nodes_json = tmpdir / "nodes.json"
    real_path = tmpdir / "nodes"
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", ["true"])

    def once():
        raise _Stop()

    with pytest.raises(FileNotFoundError):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )

    # node not present - can not update
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
            ]}
        """
        )
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )

    # node present, not in nodes - can not update
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0, "state": "new"}
            ]}
        """
        )
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )

    # node present, in nodes - nothing to do
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0,
                 "state": "ready"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )

    # node present, in nodes - new node in json
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0,
                 "state": "ready"},
                {"identity":"b", "node": "10.0.0.11", "pnn": 1,
                 "state": "new"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert "10.0.0.10" in lines
    assert "10.0.0.11" in lines

    # invalid state - nodes file and nodes json out of whack
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0,
                 "state": "ready"},
                {"identity":"b", "node": "10.0.0.11", "pnn": 1,
                 "state": "new"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
        fh.write("10.0.0.12\n")
        fh.write("10.0.0.13\n")
    with pytest.raises(ValueError):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )

    # node present but json file shows update incomplete
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0,
                 "state": "ready"},
                {"identity":"b", "node": "10.0.0.11", "pnn": 1,
                 "state": "new"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
        fh.write("10.0.0.11\n")
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert "10.0.0.10" in lines
    assert "10.0.0.11" in lines
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][1]["node"] == "10.0.0.11"
    assert jdata["nodes"][1]["state"] == "ready"

    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"identity":"a", "node": "10.0.0.10", "pnn": 0,
                 "state": "ready"},
                {"identity":"b", "node": "10.0.1.11", "pnn": 1,
                 "state": "changed"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
        fh.write("10.0.0.11\n")
    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert "10.0.0.10" in lines
    assert "#10.0.0.11" in lines
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][1]["node"] == "10.0.1.11"
    assert jdata["nodes"][1]["state"] == "replaced"

    with pytest.raises(_Stop):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert "10.0.0.10" in lines
    assert "10.0.1.11" in lines
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][1]["node"] == "10.0.1.11"
    assert jdata["nodes"][1]["state"] == "ready"


def test_manage_nodes_refresh_fails(tmpdir, monkeypatch):
    nodes_json = tmpdir / "nodes.json"
    real_path = tmpdir / "nodes"
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", ["false"])

    def once():
        raise _Stop()

    # node needs to be added
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"node": "10.0.0.10", "pnn": 0, "state": "ready"},
                {"node": "10.0.0.11", "pnn": 1, "state": "new"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
    with pytest.raises(Exception):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert "10.0.0.10" in lines
    assert "10.0.0.11" in lines
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][1]["node"] == "10.0.0.11"
    assert jdata["nodes"][1]["state"] == "new"


def test_manage_nodes_invalid_state(tmpdir):
    nodes_json = tmpdir / "nodes.json"
    real_path = tmpdir / "nodes"

    def once():
        raise _Stop()

    # node is ready but missing from nodes file
    with open(nodes_json, "w") as fh:
        fh.write(
            """
            {"nodes": [
                {"node": "10.0.0.10", "pnn": 0, "state": "ready"},
                {"node": "10.0.0.11", "pnn": 1, "state": "ready"}
            ]}
        """
        )
    with open(real_path, "w") as fh:
        fh.write("10.0.0.10\n")
    with pytest.raises(ValueError):
        ctdb.manage_nodes(
            0, nodes_json=nodes_json, real_path=real_path, pause_func=once
        )


def test_add_node_to_statefile(tmpdir):
    nodes_json = tmpdir / "nodes.json"

    ctdb.add_node_to_statefile(
        identity="node-0",
        node="10.0.0.10",
        pnn=0,
        path=nodes_json,
        in_nodes=True,
    )
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][0]["node"] == "10.0.0.10"
    assert jdata["nodes"][0]["pnn"] == 0
    assert jdata["nodes"][0]["state"] == "ready"

    with pytest.raises(ValueError):
        ctdb.add_node_to_statefile(
            identity="node-0",
            node="10.0.0.11",
            pnn=0,
            path=nodes_json,
            in_nodes=False,
        )

    with pytest.raises(ValueError):
        ctdb.add_node_to_statefile(
            identity="node-0",
            node="10.0.1.11",
            pnn=2,
            path=nodes_json,
            in_nodes=False,
        )

    ctdb.add_node_to_statefile(
        identity="node-1",
        node="10.0.0.11",
        pnn=1,
        path=nodes_json,
        in_nodes=False,
    )
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert jdata["nodes"][0]["node"] == "10.0.0.10"
    assert jdata["nodes"][0]["pnn"] == 0
    assert jdata["nodes"][0]["state"] == "ready"
    assert jdata["nodes"][1]["node"] == "10.0.0.11"
    assert jdata["nodes"][1]["pnn"] == 1
    assert jdata["nodes"][1]["state"] == "new"


def test_ensure_ctdb_node_present(tmpdir):
    real_path = tmpdir / "nodes"
    lpath = tmpdir / "nodes.lnk"

    assert not os.path.exists(real_path)

    ctdb.ensure_ctdb_node_present(
        node="10.0.0.10", expected_pnn=0, real_path=real_path, canon_path=lpath
    )
    assert os.path.islink(lpath)
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert ["10.0.0.10"] == lines

    ctdb.ensure_ctdb_node_present(
        node="10.0.0.11", expected_pnn=1, real_path=real_path, canon_path=lpath
    )
    assert os.path.islink(lpath)
    with open(real_path, "r") as fh:
        lines = [x.strip() for x in fh.readlines()]
    assert ["10.0.0.10", "10.0.0.11"] == lines

    with pytest.raises(ValueError):
        ctdb.ensure_ctdb_node_present(
            node="10.0.0.11",
            expected_pnn=0,
            real_path=real_path,
            canon_path=lpath,
        )


def test_write_ctdb_conf(tmpdir):
    path = tmpdir / "ctdb.conf"

    params = {
        "log_level": "DEBUG",
        "recovery_lock": "/tmp/foo/lock",
    }
    with open(path, "w") as fh:
        ctdb.write_ctdb_conf(fh, params)
    with open(path, "r") as fh:
        data = fh.read()
    assert "DEBUG" in data
    assert "/tmp/foo/lock" in data


def test_ensure_ctdb_conf(tmpdir):
    from .test_config import ctdb_config1
    from sambacc.config import GlobalConfig

    cfg = GlobalConfig(io.StringIO(ctdb_config1))
    path = tmpdir / "ctdb.conf"

    ctdb.ensure_ctdb_conf(iconfig=cfg.get("ctdb1"), path=path)
    with open(path, "r") as fh:
        data = fh.read()
    assert "NOTICE" in data
    assert "ERROR" in data
    assert "/var/lib/ctdb/shared/RECOVERY" in data


def test_ensure_smb_conf(tmpdir):
    from .test_config import ctdb_config1
    from sambacc.config import GlobalConfig

    cfg = GlobalConfig(io.StringIO(ctdb_config1))
    path = tmpdir / "smb.conf"

    ctdb.ensure_smb_conf(iconfig=cfg.get("ctdb1"), path=path)
    with open(path, "r") as fh:
        data = fh.read()
    assert "clustering = yes" in data
    assert "include = registry" in data


def test_refresh_node_in_statefile(tmpdir):
    nodes_json = tmpdir / "nodes.json"

    ctdb.add_node_to_statefile(
        identity="node-0",
        node="10.0.0.10",
        pnn=0,
        path=nodes_json,
        in_nodes=True,
    )

    # no changes
    ctdb.refresh_node_in_statefile(
        identity="node-0",
        node="10.0.0.10",
        pnn=0,
        path=nodes_json,
    )
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert len(jdata["nodes"]) == 1
    assert jdata["nodes"][0]["node"] == "10.0.0.10"
    assert jdata["nodes"][0]["identity"] == "node-0"
    assert jdata["nodes"][0]["pnn"] == 0
    assert jdata["nodes"][0]["state"] == "ready"

    # ip has changed
    ctdb.refresh_node_in_statefile(
        identity="node-0",
        node="10.0.1.10",
        pnn=0,
        path=nodes_json,
    )
    with open(nodes_json, "r") as fh:
        jdata = json.load(fh)
    assert len(jdata["nodes"]) == 1
    assert jdata["nodes"][0]["node"] == "10.0.1.10"
    assert jdata["nodes"][0]["identity"] == "node-0"
    assert jdata["nodes"][0]["pnn"] == 0
    assert jdata["nodes"][0]["state"] == "changed"

    with pytest.raises(ValueError):
        ctdb.refresh_node_in_statefile(
            identity="foobar",
            node="10.0.1.10",
            pnn=0,
            path=nodes_json,
        )

    with pytest.raises(ctdb.NodeNotPresent):
        ctdb.refresh_node_in_statefile(
            identity="node-1",
            node="10.0.0.11",
            pnn=1,
            path=nodes_json,
        )


def test_next_state():
    assert ctdb.next_state(ctdb.NodeState.READY) == ctdb.NodeState.READY
    assert ctdb.next_state(ctdb.NodeState.NEW) == ctdb.NodeState.READY
    assert ctdb.next_state(ctdb.NodeState.REPLACED) == ctdb.NodeState.READY
    assert ctdb.next_state(ctdb.NodeState.CHANGED) == ctdb.NodeState.REPLACED


def test_cli_leader_locator(tmpdir, monkeypatch, caplog):
    import logging

    caplog.set_level(logging.INFO)
    fake_ctdb = tmpdir / "fake_ctdb.sh"
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [fake_ctdb])
    monkeypatch.setenv("SAMBA_SPECIFICS", "ctdb_leader_admin_command")
    ldr_admin_cmd = sambacc.samba_cmds.ctdb_leader_admin_cmd()

    def _fake_ctdb_script(pnn, recmaster):
        with open(fake_ctdb, "w") as fh:
            fh.write("#!/bin/sh\n")
            fh.write("case $2 in\n")
            fh.write(f"pnn) {pnn};;\n")
            fh.write(f"{ldr_admin_cmd}) {recmaster};;\n")
            fh.write("esac\n")
            fh.write("exit 5\n")
        os.chmod(fake_ctdb, 0o700)

    _fake_ctdb_script(pnn="echo 0; exit 0", recmaster="echo 0; exit 0")
    with ctdb.CLILeaderLocator() as status:
        assert status.is_leader()

    _fake_ctdb_script(pnn="echo 1; exit 0", recmaster="echo 0; exit 0")
    with ctdb.CLILeaderLocator() as status:
        assert not status.is_leader()

    # test error handling
    _fake_ctdb_script(pnn="exit 1", recmaster="echo 0; exit 0")
    with ctdb.CLILeaderLocator() as status:
        assert not status.is_leader()
    assert "pnn" in caplog.records[-1].getMessage()
    assert "['" + ldr_admin_cmd + "']" not in caplog.records[-1].getMessage()
    _fake_ctdb_script(pnn="echo 1; exit 0", recmaster="exit 1")
    with ctdb.CLILeaderLocator() as status:
        assert not status.is_leader()
    assert "pnn" not in caplog.records[-1].getMessage()
    assert "['" + ldr_admin_cmd + "']" in caplog.records[-1].getMessage()

    os.unlink(fake_ctdb)
    with ctdb.CLILeaderLocator() as status:
        assert not status.is_leader()
    assert "pnn" in caplog.records[-2].getMessage()
    assert "['" + ldr_admin_cmd + "']" in caplog.records[-1].getMessage()


def test_check_nodestatus(tmp_path):
    import os

    datapath = tmp_path / "_ctdb"
    datapath.mkdir()

    fake_ctdb = [
        "#!/bin/sh",
        'if [ "$1$TESTFAIL" == "nodestatus" ]',
        "then exit 0;",
        "else exit 1;",
        "fi",
    ]
    fake_ctdb_script = datapath / "ctdb.sh"
    with open(fake_ctdb_script, "w") as fh:
        fh.write("\n".join(fake_ctdb))
        fh.write("\n")
    os.chmod(fake_ctdb_script, 0o755)

    test_cmd = sambacc.samba_cmds.SambaCommand(fake_ctdb_script)
    # simulate nodestatus == OK
    pid = os.fork()
    if pid == 0:
        ctdb.check_nodestatus(cmd=test_cmd)
    else:
        _, status = os.waitpid(pid, 0)
        assert status == 0

    # simulate nodestatus != OK
    pid = os.fork()
    if pid == 0:
        os.environ["TESTFAIL"] = "yes"
        ctdb.check_nodestatus(cmd=test_cmd)
    else:
        _, status = os.waitpid(pid, 0)
        assert status != 0


def test_ensure_ctdb_port_in_services(tmp_path):
    fname = tmp_path / "fake.services"
    with open(fname, "w") as fh:
        # random snippets from a real /etc/services
        print("# this is a comment", file=fh)
        print("ftp             21/tcp", file=fh)
        print("ftp             21/udp          fsp fspd", file=fh)
        print("ssh             22/tcp", file=fh)
        print("ssh             22/udp", file=fh)
        print("telnet          23/tcp", file=fh)
        print("telnet          23/udp", file=fh)
        print("ctdb            4379/tcp        # CTDB", file=fh)
        print("ctdb            4379/udp        # CTDB", file=fh)
        print("roce            4791/udp", file=fh)
        print("# a random comment...", file=fh)
        print("snss            11171/udp", file=fh)
        print("oemcacao-jmxmp  11172/tcp", file=fh)

    ctdb.ensure_ctdb_port_in_services(9099, fname)

    with open(fname) as fh:
        content = fh.read()
    assert "ctdb  9099/tcp" in content
    assert "ctdb  9099/udp" in content
    assert "ctdb            4379/tcp" not in content
    assert "ctdb            4379/udp" not in content
    # others
    assert "ssh             22/tcp" in content
    assert "snss            11171/udp" in content
07070100000062000081A4000000000000000000000001684BE19C00001BA0000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/tests/test_grpc_backend.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import os

import pytest

import sambacc.grpc.backend


config1 = """
{
  "samba-container-config": "v0",
  "configs": {
    "foobar": {
      "shares": [
        "share",
        "stuff"
      ],
      "globals": ["global0"],
      "instance_name": "GANDOLPH"
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "read only": "no",
        "valid users": "sambauser",
        "guest ok": "no",
        "force user": "root"
      }
    },
    "stuff": {
      "options": {
        "path": "/mnt/stuff"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "workgroup": "SAMBA",
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "_extra_junk": 0
}
"""

json1 = """
{
  "timestamp": "2025-05-08T20:41:57.273489+0000",
  "version": "4.23.0pre1-UNKNOWN",
  "smb_conf": "/etc/samba/smb.conf",
  "sessions": {
    "2891148582": {
      "session_id": "2891148582",
      "server_id": {
        "pid": "1243",
        "task_id": "0",
        "vnn": "2",
        "unique_id": "1518712196307698939"
      },
      "uid": 103107,
      "gid": 102513,
      "username": "DOMAIN1\\\\bwayne",
      "groupname": "DOMAIN1\\\\domain users",
      "creation_time": "2025-05-08T20:39:36.456835+00:00",
      "expiration_time": "30828-09-14T02:48:05.477581+00:00",
      "auth_time": "2025-05-08T20:39:36.457633+00:00",
      "remote_machine": "127.0.0.1",
      "hostname": "ipv4:127.0.0.1:59396",
      "session_dialect": "SMB3_11",
      "client_guid": "adc145fe-0677-4ab6-9d61-c25b30211174",
      "encryption": {
        "cipher": "-",
        "degree": "none"
      },
      "signing": {
        "cipher": "AES-128-GMAC",
        "degree": "partial"
      },
      "channels": {
        "1": {
          "channel_id": "1",
          "creation_time": "2025-05-08T20:39:36.456835+00:00",
          "local_address": "ipv4:127.0.0.1:445",
          "remote_address": "ipv4:127.0.0.1:59396",
          "transport": "tcp"
        }
      }
    }
  },
  "tcons": {
    "3757739897": {
      "service": "cephomatic",
      "server_id": {
        "pid": "1243",
        "task_id": "0",
        "vnn": "2",
        "unique_id": "1518712196307698939"
      },
      "tcon_id": "3757739897",
      "session_id": "2891148582",
      "machine": "127.0.0.1",
      "connected_at": "2025-05-08T20:39:36.464088+00:00",
      "encryption": {
        "cipher": "-",
        "degree": "none"
      },
      "signing": {
        "cipher": "-",
        "degree": "none"
      }
    }
  },
  "open_files": {}
}
"""


def _status_json1_check(status):
    assert status.timestamp == "2025-05-08T20:41:57.273489+0000"
    assert len(status.sessions) == 1
    s1 = status.sessions[0]
    assert s1.session_id == "2891148582"
    assert s1.username == "DOMAIN1\\bwayne"
    assert s1.groupname == "DOMAIN1\\domain users"
    assert s1.remote_machine == "127.0.0.1"
    assert s1.hostname == "ipv4:127.0.0.1:59396"
    assert s1.session_dialect == "SMB3_11"
    assert s1.uid == 103107
    assert s1.gid == 102513
    assert len(status.tcons) == 1
    assert s1.encryption
    assert s1.encryption.cipher == ""
    assert s1.encryption.degree == "none"
    assert s1.signing
    assert s1.signing.cipher == "AES-128-GMAC"
    assert s1.signing.degree == "partial"
    t1 = status.tcons[0]
    assert t1.tcon_id == "3757739897"
    assert t1.session_id == "2891148582"
    assert t1.service_name == "cephomatic"


def _fake_command(tmp_path, monkeypatch, *, output="", exitcode=0):
    fakedir = tmp_path / "fake"
    fake = fakedir / "fake.sh"
    outfile = fakedir / "stdout"

    print(fakedir)
    print(fakedir.mkdir, fakedir.mkdir.__doc__)
    fakedir.mkdir(parents=True, exist_ok=True)
    monkeypatch.setattr(sambacc.samba_cmds, "_GLOBAL_PREFIX", [str(fake)])

    if output:
        outfile.write_text(output)
    fake.write_text(
        "#!/bin/sh\n"
        f"test -f {outfile} && cat {outfile}\n"
        f"exit {exitcode}\n"
    )
    os.chmod(fake, 0o755)


def _instance_config():
    fh = io.StringIO(config1)
    g = sambacc.config.GlobalConfig(fh)
    return g.get("foobar")


def test_parse_status():
    status = sambacc.grpc.backend.Status.parse(json1)
    _status_json1_check(status)


def test_backend_versions(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch, output="Version 4.99.99\n")
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    v = backend.get_versions()
    assert v.samba_version == "Version 4.99.99"


def test_backend_is_clustered(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    assert not backend.is_clustered()


def test_backend_status(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch, output=json1)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    status = backend.get_status()
    _status_json1_check(status)


def test_backend_status_error(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch, exitcode=2)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    with pytest.raises(Exception):
        backend.get_status()


def test_backend_close_share(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    backend.close_share("share", denied_users=False)


def test_backend_close_share_error(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch, exitcode=2)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    with pytest.raises(Exception):
        backend.close_share("share", denied_users=False)


def test_backend_kill_client(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    backend.kill_client("127.0.0.1")


def test_backend_kill_client_error(tmp_path, monkeypatch):
    _fake_command(tmp_path, monkeypatch, exitcode=2)
    backend = sambacc.grpc.backend.ControlBackend(_instance_config())
    with pytest.raises(Exception):
        backend.kill_client("127.0.0.1")
07070100000063000081A4000000000000000000000001684BE19C00001C4B000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/tests/test_grpc_server.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2025  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import collections

import pytest

from sambacc.grpc import backend

json1 = """
{
  "timestamp": "2025-05-08T20:41:57.273489+0000",
  "version": "4.23.0pre1-UNKNOWN",
  "smb_conf": "/etc/samba/smb.conf",
  "sessions": {
    "2891148582": {
      "session_id": "2891148582",
      "server_id": {
        "pid": "1243",
        "task_id": "0",
        "vnn": "2",
        "unique_id": "1518712196307698939"
      },
      "uid": 103107,
      "gid": 102513,
      "username": "DOMAIN1\\\\bwayne",
      "groupname": "DOMAIN1\\\\domain users",
      "creation_time": "2025-05-08T20:39:36.456835+00:00",
      "expiration_time": "30828-09-14T02:48:05.477581+00:00",
      "auth_time": "2025-05-08T20:39:36.457633+00:00",
      "remote_machine": "127.0.0.1",
      "hostname": "ipv4:127.0.0.1:59396",
      "session_dialect": "SMB3_11",
      "client_guid": "adc145fe-0677-4ab6-9d61-c25b30211174",
      "encryption": {
        "cipher": "-",
        "degree": "none"
      },
      "signing": {
        "cipher": "AES-128-GMAC",
        "degree": "partial"
      },
      "channels": {
        "1": {
          "channel_id": "1",
          "creation_time": "2025-05-08T20:39:36.456835+00:00",
          "local_address": "ipv4:127.0.0.1:445",
          "remote_address": "ipv4:127.0.0.1:59396",
          "transport": "tcp"
        }
      }
    }
  },
  "tcons": {
    "3757739897": {
      "service": "cephomatic",
      "server_id": {
        "pid": "1243",
        "task_id": "0",
        "vnn": "2",
        "unique_id": "1518712196307698939"
      },
      "tcon_id": "3757739897",
      "session_id": "2891148582",
      "machine": "127.0.0.1",
      "connected_at": "2025-05-08T20:39:36.464088+00:00",
      "encryption": {
        "cipher": "-",
        "degree": "none"
      },
      "signing": {
        "cipher": "-",
        "degree": "none"
      }
    }
  },
  "open_files": {}
}
"""


class MockBackend:
    def __init__(self):
        self._counter = collections.Counter()
        self._versions = backend.Versions(
            samba_version="4.99.5",
            sambacc_version="a.b.c",
            container_version="test.v",
        )
        self._is_clustered = False
        self._status = backend.Status.parse(json1)
        self._kaboom = None

    def get_versions(self) -> backend.Versions:
        self._counter["get_versions"] += 1
        if self._kaboom:
            raise self._kaboom
        return self._versions

    def is_clustered(self) -> bool:
        self._counter["is_clustered"] += 1
        return self._is_clustered

    def get_status(self) -> backend.Status:
        self._counter["get_status"] += 1
        return self._status

    def close_share(self, share_name: str, denied_users: bool) -> None:
        self._counter["close_share"] += 1

    def kill_client(self, ip_address: str) -> None:
        self._counter["kill_client"] += 1


@pytest.fixture()
def mock_grpc_server():
    try:
        import sambacc.grpc.server
    except ImportError:
        pytest.skip("can not import grpc server")

    class TestConfig(sambacc.grpc.server.ServerConfig):
        max_workers = 3
        address = "localhost:54445"
        insecure = True
        _server = None
        backend = None

        def wait(self, server):
            self._server = server

    tc = TestConfig()
    tc.backend = MockBackend()
    sambacc.grpc.server.serve(tc, tc.backend)
    assert tc._server
    assert tc.backend
    yield tc
    tc._server.stop(0.1).wait()


def test_info(mock_grpc_server):
    import grpc
    import sambacc.grpc.generated.control_pb2_grpc as _rpc
    import sambacc.grpc.generated.control_pb2 as _pb

    with grpc.insecure_channel(mock_grpc_server.address) as channel:
        client = _rpc.SambaControlStub(channel)
        rsp = client.Info(_pb.InfoRequest())

    assert mock_grpc_server.backend._counter["get_versions"] == 1
    assert rsp.samba_info.version == "4.99.5"
    assert not rsp.samba_info.clustered
    assert rsp.container_info.sambacc_version == "a.b.c"
    assert rsp.container_info.container_version == "test.v"


def test_info_error(mock_grpc_server):
    import grpc
    import sambacc.grpc.generated.control_pb2_grpc as _rpc
    import sambacc.grpc.generated.control_pb2 as _pb

    mock_grpc_server.backend._kaboom = ValueError("kaboom")
    with grpc.insecure_channel(mock_grpc_server.address) as channel:
        client = _rpc.SambaControlStub(channel)
        with pytest.raises(grpc.RpcError):
            client.Info(_pb.InfoRequest())

    assert mock_grpc_server.backend._counter["get_versions"] == 1


def test_status(mock_grpc_server):
    import grpc
    import sambacc.grpc.generated.control_pb2_grpc as _rpc
    import sambacc.grpc.generated.control_pb2 as _pb

    with grpc.insecure_channel(mock_grpc_server.address) as channel:
        client = _rpc.SambaControlStub(channel)
        rsp = client.Status(_pb.StatusRequest())

    assert mock_grpc_server.backend._counter["get_status"] == 1
    assert rsp.server_timestamp == "2025-05-08T20:41:57.273489+0000"
    # data assertions
    assert len(rsp.sessions) == 1
    assert rsp.sessions[0].session_id == "2891148582"
    assert rsp.sessions[0].uid == 103107
    assert rsp.sessions[0].gid == 102513
    assert rsp.sessions[0].username == "DOMAIN1\\bwayne"
    assert rsp.sessions[0].encryption
    assert rsp.sessions[0].encryption.cipher == ""
    assert rsp.sessions[0].encryption.degree == "none"
    assert rsp.sessions[0].signing
    assert rsp.sessions[0].signing.cipher == "AES-128-GMAC"
    assert rsp.sessions[0].signing.degree == "partial"


def test_close_share(mock_grpc_server):
    import grpc
    import sambacc.grpc.generated.control_pb2_grpc as _rpc
    import sambacc.grpc.generated.control_pb2 as _pb

    with grpc.insecure_channel(mock_grpc_server.address) as channel:
        client = _rpc.SambaControlStub(channel)
        rsp = client.CloseShare(_pb.CloseShareRequest(share_name="bob"))

    assert mock_grpc_server.backend._counter["close_share"] == 1
    assert rsp


def test_kill_client(mock_grpc_server):
    import grpc
    import sambacc.grpc.generated.control_pb2_grpc as _rpc
    import sambacc.grpc.generated.control_pb2 as _pb

    with grpc.insecure_channel(mock_grpc_server.address) as channel:
        client = _rpc.SambaControlStub(channel)
        rsp = client.KillClientConnection(
            _pb.KillClientRequest(ip_address="192.168.76.18")
        )

    assert mock_grpc_server.backend._counter["kill_client"] == 1
    assert rsp
07070100000064000081A4000000000000000000000001684BE19C0000092E000000000000000000000000000000000000003900000000sambacc-v0.6+git.60.2f89a38/tests/test_inotify_waiter.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import contextlib
import threading
import time

import pytest

try:
    import sambacc.inotify_waiter
except ImportError:
    pytestmark = pytest.mark.skip


@contextlib.contextmanager
def background(bg_func):
    t = threading.Thread(target=bg_func)
    t.start()
    try:
        yield None
    finally:
        t.join()


def test_inotify(tmp_path):
    tfile = str(tmp_path / "foobar.txt")
    tfile2 = str(tmp_path / "other.txt")

    iw = sambacc.inotify_waiter.INotify(tfile, print_func=print, timeout=3)

    def _touch():
        time.sleep(0.2)
        with open(tfile, "w") as fh:
            print("W", tfile)
            fh.write("one")

    with background(_touch):
        before = time.time()
        iw.wait()
        after = time.time()
    assert after - before > 0.1
    assert after - before <= 1

    def _touch2():
        time.sleep(0.2)
        with open(tfile2, "w") as fh:
            print("W", tfile2)
            fh.write("two")
        time.sleep(1)
        with open(tfile, "w") as fh:
            print("W", tfile)
            fh.write("one")

    with background(_touch2):
        before = time.time()
        iw.wait()
        after = time.time()

    assert after - before > 0.1
    assert after - before >= 1

    before = time.time()
    iw.wait()
    after = time.time()
    assert int(after) - int(before) == 3
    iw.close()


def test_inotify_bad_input():
    with pytest.raises(ValueError):
        sambacc.inotify_waiter.INotify("/")


def test_inotify_relative_path():
    iw = sambacc.inotify_waiter.INotify("cool.txt")
    assert iw._dir == "."
    assert iw._name == "cool.txt"
07070100000065000081A4000000000000000000000001684BE19C000009E9000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/tests/test_jfile.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import pytest

from sambacc import jfile


def test_open(tmpdir):
    with pytest.raises(FileNotFoundError):
        jfile.open(tmpdir / "a.json", jfile.OPEN_RO)
    fh = jfile.open(tmpdir / "a.json", jfile.OPEN_RW)
    assert fh is not None
    fh.close()


def test_laod(tmpdir):
    with jfile.open(tmpdir / "a.json", jfile.OPEN_RW) as fh:
        data = jfile.load(fh, ["missing"])
    assert data == ["missing"]

    with open(tmpdir / "a.json", "w") as fh:
        fh.write('{"present": true}\n')
    with jfile.open(tmpdir / "a.json", jfile.OPEN_RW) as fh:
        data = jfile.load(fh, ["missing"])
    assert data == {"present": True}


def test_dump(tmpdir):
    with jfile.open(tmpdir / "a.json", jfile.OPEN_RW) as fh:
        jfile.dump({"something": "good"}, fh)

    with jfile.open(tmpdir / "a.json", jfile.OPEN_RO) as fh:
        data = jfile.load(fh)
    assert data == {"something": "good"}

    with jfile.open(tmpdir / "a.json", jfile.OPEN_RW) as fh:
        jfile.dump({"something": "better"}, fh)

    with jfile.open(tmpdir / "a.json", jfile.OPEN_RO) as fh:
        data = jfile.load(fh)
    assert data == {"something": "better"}


def test_flock(tmpdir):
    import time
    import threading

    def sleepy_update(path):
        with jfile.open(path, jfile.OPEN_RW) as fh:
            jfile.flock(fh)
            data = jfile.load(fh, [0])
            time.sleep(0.2)
            data.append(data[-1] + 1)
            jfile.dump(data, fh)

    fpath = tmpdir / "a.json"
    t1 = threading.Thread(target=sleepy_update, args=(fpath,))
    t1.start()
    t2 = threading.Thread(target=sleepy_update, args=(fpath,))
    t2.start()
    t1.join()
    t2.join()

    with jfile.open(fpath, jfile.OPEN_RW) as fh:
        jfile.flock(fh)
        data = jfile.load(fh)
    assert data == [0, 1, 2]
07070100000066000081A4000000000000000000000001684BE19C0000220A000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/tests/test_join.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import json
import os
import pytest

from sambacc import samba_cmds
import sambacc.join


@pytest.fixture(scope="function")
def testjoiner(tmp_path):
    data_path = tmp_path / "_samba"
    data_path.mkdir()

    fake_net = [
        "#!/bin/sh",
        f'echo "ARGS:" "$@" > {data_path}/log',
        f"cat >> {data_path}/log",
        f"if grep -q failme {data_path}/log ; then exit 1; fi",
    ]
    fake_net_script = data_path / "net.sh"
    with open(fake_net_script, "w") as fh:
        fh.write("\n".join(fake_net))
        fh.write("\n")
    os.chmod(fake_net_script, 0o755)

    class TestJoiner(sambacc.join.Joiner):
        _net_ads_join = samba_cmds.SambaCommand(fake_net_script)["ads", "join"]
        _requestodj = samba_cmds.SambaCommand(fake_net_script)[
            "offlinejoin", "requestodj"
        ]
        path = tmp_path
        logpath = data_path / "log"
        _nullfh = None

        def _interactive_input(self):
            return self._nullfh

    with open(os.devnull, "rb") as dnull:
        TestJoiner._null = dnull
        yield TestJoiner()


def test_no_sources(testjoiner):
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()


def test_invalid_source_vals(testjoiner):
    with pytest.raises(AssertionError):
        testjoiner.add_pw_source("abc123")
    with pytest.raises(AssertionError):
        testjoiner.add_interactive_source("xyzdef")


def test_join_password(testjoiner):
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    testjoiner.join()


def test_join_file(testjoiner):
    jpath1 = os.path.join(testjoiner.path, "join1.json")
    with open(jpath1, "w") as fh:
        json.dump({"username": "elmer", "password": "hunter2"}, fh)
    testjoiner.add_file_source(jpath1)
    testjoiner.join()


def test_join_missing_file(testjoiner):
    jpath1 = os.path.join(testjoiner.path, "nope.json")
    testjoiner.add_file_source(jpath1)
    with pytest.raises(sambacc.join.JoinError) as err:
        testjoiner.join()
    assert "not found" in str(err).lower()


def test_join_bad_file(testjoiner):
    jpath1 = os.path.join(testjoiner.path, "join1.json")
    testjoiner.add_file_source(jpath1)

    with open(jpath1, "w") as fh:
        json.dump({"acme": True}, fh)
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()

    with open(jpath1, "w") as fh:
        json.dump({"username": None, "password": "hunter2"}, fh)
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()

    with open(jpath1, "w") as fh:
        json.dump({"username": "elmer", "password": 123}, fh)
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()


def test_join_multi_source(testjoiner):
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    jpath1 = os.path.join(testjoiner.path, "join1.json")
    with open(jpath1, "w") as fh:
        json.dump({"username": "elmer", "password": "hunter2"}, fh)
    testjoiner.add_file_source(jpath1)
    testjoiner.join()

    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()
    assert lines[0].startswith("ARGS")
    assert "bugs" in lines[0]
    assert "whatsupdoc" in lines[1]


def test_join_multi_source_fail_first(testjoiner):
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "failme"),
    )
    jpath1 = os.path.join(testjoiner.path, "join1.json")
    with open(jpath1, "w") as fh:
        json.dump({"username": "elmer", "password": "hunter2"}, fh)
    testjoiner.add_file_source(jpath1)
    testjoiner.join()

    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()
    assert lines[0].startswith("ARGS")
    assert "elmer" in lines[0]
    assert "hunter2" in lines[1]


def test_join_multi_source_fail_both(testjoiner):
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "failme"),
    )
    jpath1 = os.path.join(testjoiner.path, "join1.json")
    with open(jpath1, "w") as fh:
        json.dump({"username": "elmer", "password": "failme2"}, fh)
    testjoiner.add_file_source(jpath1)
    with pytest.raises(sambacc.join.JoinError) as err:
        testjoiner.join()
    assert err.match("2 join attempts")
    assert len(err.value.errors) == 2

    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()
    assert lines[0].startswith("ARGS")
    assert "elmer" in lines[0]
    assert "failme2" in lines[1]


def test_join_prompt_fake(testjoiner):
    testjoiner.add_interactive_source(
        sambacc.join.UserPass("daffy"),
    )
    testjoiner.join()

    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()

    assert lines[0].startswith("ARGS")
    assert "daffy" in lines[0]
    assert len(lines) == 1


def test_join_with_marker(testjoiner):
    testjoiner.marker = os.path.join(testjoiner.path, "marker.json")
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    testjoiner.join()

    assert os.path.exists(testjoiner.marker)
    assert testjoiner.did_join()


def test_join_bad_marker(testjoiner):
    testjoiner.marker = os.path.join(testjoiner.path, "marker.json")
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    testjoiner.join()

    # its ok after creation
    assert testjoiner.did_join()
    # invalid contents
    with open(testjoiner.marker, "w") as fh:
        json.dump({"foo": "bar"}, fh)
    assert not testjoiner.did_join()
    # missing file
    os.unlink(testjoiner.marker)
    assert not testjoiner.did_join()


def test_join_no_marker(testjoiner):
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    testjoiner.join()
    # join was successful, but no marker was set on the joiner
    # thus did_join must return false
    assert not testjoiner.did_join()


def test_join_when_possible(testjoiner):
    class DummyWaiter:
        wcount = 0

        def wait(self):
            self.wcount += 1

    waiter = DummyWaiter()

    errors = []

    def ehandler(err):
        if len(errors) > 5:
            raise ValueError("xxx")
        errors.append(err)

    # error case - no valid join souces
    with pytest.raises(ValueError):
        sambacc.join.join_when_possible(
            testjoiner, waiter, error_handler=ehandler
        )

    assert len(errors) == 6
    assert waiter.wcount == 6

    # success case - performs a password join
    errors[:] = []
    testjoiner.add_pw_source(
        sambacc.join.UserPass("bugs", "whatsupdoc"),
    )
    testjoiner.marker = os.path.join(testjoiner.path, "marker.json")
    sambacc.join.join_when_possible(testjoiner, waiter, error_handler=ehandler)

    assert len(errors) == 0
    assert waiter.wcount == 6
    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()
    assert lines[0].startswith("ARGS")
    assert "bugs" in lines[0]
    assert "whatsupdoc" in lines[1]

    # success case - join marker exists
    sambacc.join.join_when_possible(testjoiner, waiter, error_handler=ehandler)

    assert len(errors) == 0
    assert waiter.wcount == 6


def test_offline_join(testjoiner):
    odj_path = os.path.join(testjoiner.path, "foo.odj")
    with open(odj_path, "w") as fh:
        fh.write("FAKE!\n")
    testjoiner.add_odj_file_source(odj_path)
    testjoiner.join()

    with open(testjoiner.logpath) as fh:
        lines = fh.readlines()
    assert lines[0].startswith("ARGS")
    assert lines[1].startswith("FAKE!")


def test_offline_join_nofile(testjoiner):
    odj_path = os.path.join(testjoiner.path, "foo.odj")
    testjoiner.add_odj_file_source(odj_path)
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()


def test_offline_join_fail(testjoiner):
    odj_path = os.path.join(testjoiner.path, "foo.odj")
    with open(odj_path, "w") as fh:
        fh.write("failme!\n")
    testjoiner.add_odj_file_source(odj_path)
    with pytest.raises(sambacc.join.JoinError):
        testjoiner.join()
07070100000067000081A4000000000000000000000001684BE19C000007BB000000000000000000000000000000000000002F00000000sambacc-v0.6+git.60.2f89a38/tests/test_main.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import pytest

import sambacc.commands.cli
import sambacc.commands.main
from .test_netcmd_loader import config1


def run(*args):
    return sambacc.commands.main.main(args)


def test_no_id(capsys):
    with pytest.raises(sambacc.commands.cli.Fail):
        run("print-config")


def test_print_config(capsys, tmp_path):
    fname = tmp_path / "sample.json"
    with open(fname, "w") as fh:
        fh.write(config1)
    run("--identity", "foobar", "--config", str(fname), "print-config")
    out, err = capsys.readouterr()
    assert "[global]" in out
    assert "netbios name = GANDOLPH" in out
    assert "[share]" in out
    assert "path = /share" in out
    assert "[stuff]" in out
    assert "path = /mnt/stuff" in out


def test_print_config_env_vars(capsys, tmp_path, monkeypatch):
    fname = tmp_path / "sample.json"
    with open(fname, "w") as fh:
        fh.write(config1)
    monkeypatch.setenv("SAMBACC_CONFIG", str(fname))
    monkeypatch.setenv("SAMBA_CONTAINER_ID", "foobar")
    run("print-config")
    out, err = capsys.readouterr()
    assert "[global]" in out
    assert "netbios name = GANDOLPH" in out
    assert "[share]" in out
    assert "path = /share" in out
    assert "[stuff]" in out
    assert "path = /mnt/stuff" in out
07070100000068000081A4000000000000000000000001684BE19C00000E76000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_netcmd_loader.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import pytest

import sambacc.config
import sambacc.netcmd_loader
import sambacc.samba_cmds

smb_conf = """
[global]
cache directory = {path}
state directory = {path}
private dir = {path}
include = registry
"""

config1 = """
{
  "samba-container-config": "v0",
  "configs": {
    "foobar": {
      "shares": [
        "share",
        "stuff"
      ],
      "globals": ["global0"],
      "instance_name": "GANDOLPH"
    }
  },
  "shares": {
    "share": {
      "options": {
        "path": "/share",
        "read only": "no",
        "valid users": "sambauser",
        "guest ok": "no",
        "force user": "root"
      }
    },
    "stuff": {
      "options": {
        "path": "/mnt/stuff"
      }
    }
  },
  "globals": {
    "global0": {
      "options": {
        "workgroup": "SAMBA",
        "security": "user",
        "server min protocol": "SMB2",
        "load printers": "no",
        "printing": "bsd",
        "printcap name": "/dev/null",
        "disable spoolss": "yes",
        "guest ok": "no"
      }
    }
  },
  "_extra_junk": 0
}
"""


@pytest.fixture(scope="function")
def testloader(tmp_path):
    data_path = tmp_path / "_samba"
    data_path.mkdir()
    smb_conf_path = tmp_path / "smb.conf"
    with open(smb_conf_path, "w") as fh:
        fh.write(smb_conf.format(path=data_path))

    ldr = sambacc.netcmd_loader.NetCmdLoader()
    ldr._net_conf = sambacc.samba_cmds.net[
        "--configfile={}".format(smb_conf_path), "conf"
    ]
    return ldr


def test_import(testloader):
    fh = io.StringIO(config1)
    g = sambacc.config.GlobalConfig(fh)
    testloader.import_config(g.get("foobar"))


def test_current_shares(testloader):
    shares = testloader.current_shares()
    assert len(shares) == 0
    fh = io.StringIO(config1)
    g = sambacc.config.GlobalConfig(fh)
    testloader.import_config(g.get("foobar"))
    shares = testloader.current_shares()
    assert len(shares) == 2
    assert "share" in shares
    assert "stuff" in shares


def test_dump(testloader, tmp_path):
    fh = io.StringIO(config1)
    g = sambacc.config.GlobalConfig(fh)
    testloader.import_config(g.get("foobar"))

    with open(tmp_path / "dump.txt", "w") as fh:
        testloader.dump(fh)
    with open(tmp_path / "dump.txt") as fh:
        dump = fh.read()

    assert "[global]" in dump
    assert "netbios name = GANDOLPH" in dump
    assert "[share]" in dump
    assert "path = /share" in dump
    assert "[stuff]" in dump
    assert "path = /mnt/stuff" in dump


def test_set(testloader, tmp_path):
    testloader.set("global", "client signing", "mandatory")

    with open(tmp_path / "dump.txt", "w") as fh:
        testloader.dump(fh)
    with open(tmp_path / "dump.txt") as fh:
        dump = fh.read()

    assert "[global]" in dump
    assert "client signing = mandatory" in dump


def test_loader_error_set(testloader, tmp_path):
    with pytest.raises(sambacc.netcmd_loader.LoaderError):
        testloader.set("", "", "yikes")
07070100000069000081A4000000000000000000000001684BE19C00000D6B000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_passdb_loader.pyimport contextlib
import io
import os
import pytest
import shutil

import sambacc.passdb_loader
import sambacc.config
from .test_config import config2

_smb_conf = """
[global]
cache directory = {path}
state directory = {path}
private dir = {path}
include = registry
"""

passwd_append1 = [
    "alice:x:1010:1010:ALICE:/home/alice:/bin/bash\n",
    "bob:x:1011:1011:BOB:/home/bob:/bin/bash\n",
    "carol:x:1010:1010:carol:/home/alice:/bin/bash\n",
]


@pytest.fixture(scope="function")
def smb_conf(tmp_path):
    data_path = tmp_path / "_samba"
    data_path.mkdir()
    smb_conf_path = tmp_path / "smb.conf"
    with open(smb_conf_path, "w") as fh:
        fh.write(_smb_conf.format(path=data_path))
    return smb_conf_path


@contextlib.contextmanager
def alter_passwd(path, append):
    bkup = path / "passwd.bak"
    mypasswd = os.environ.get("NSS_WRAPPER_PASSWD")
    shutil.copy(mypasswd, bkup)
    with open(mypasswd, "a") as fh:
        fh.write("\n")
        for line in passwd_append1:
            fh.write(line)
    yield
    shutil.copy(bkup, mypasswd)


def requires_passdb_modules():
    try:
        sambacc.passdb_loader._samba_modules()
    except ImportError:
        pytest.skip("unable to load samba passdb modules")


def test_init_custom_smb_conf(smb_conf):
    requires_passdb_modules()
    sambacc.passdb_loader.PassDBLoader(smbconf=str(smb_conf))


def test_init_default_smb_conf():
    requires_passdb_modules()
    # this is a bit hacky, but I don't want to assume the local
    # system has or doesn't have a "real" smb.conf
    if os.path.exists("/etc/samba/smb.conf"):
        sambacc.passdb_loader.PassDBLoader(smbconf=None)
    else:
        with pytest.raises(Exception):
            sambacc.passdb_loader.PassDBLoader(smbconf=None)


def test_add_users(tmp_path, smb_conf):
    requires_passdb_modules()
    # TODO: actually use nss_wrapper!
    if not os.environ.get("NSS_WRAPPER_PASSWD"):
        pytest.skip("need to have path to passwd file")
    if os.environ.get("WRITABLE_PASSWD") != "yes":
        pytest.skip("need to append users to passwd file")
    with alter_passwd(tmp_path, passwd_append1):
        fh = io.StringIO(config2)
        g = sambacc.config.GlobalConfig(fh)
        ic = g.get("foobar")
        users = list(ic.users())

        pdbl = sambacc.passdb_loader.PassDBLoader(smbconf=str(smb_conf))
        for u in users:
            pdbl.add_user(u)


def test_add_user_not_in_passwd(smb_conf):
    requires_passdb_modules()
    pdbl = sambacc.passdb_loader.PassDBLoader(smbconf=str(smb_conf))

    # Irritatingly, the passwd file contents appear to be cached
    # so we need to make up a user that is def. not in the etc passwd
    # equivalent, in order to get samba libs to reject it
    urec = dict(name="nogoodnik", uid=101010, gid=101010, password="yuck")
    ubad = sambacc.config.UserEntry(None, urec, 0)
    with pytest.raises(Exception):
        pdbl.add_user(ubad)


def test_add_user_no_passwd(smb_conf):
    requires_passdb_modules()
    pdbl = sambacc.passdb_loader.PassDBLoader(smbconf=str(smb_conf))

    # Irritatingly, the passwd file contents appear to be cached
    # so we need to make up a user that is def. not in the etc passwd
    # equivalent, in order to get samba libs to reject it
    urec = dict(name="bob", uid=1011, gid=1011)
    ubad = sambacc.config.UserEntry(None, urec, 0)
    with pytest.raises(ValueError):
        pdbl.add_user(ubad)
0707010000006A000081A4000000000000000000000001684BE19C00000E45000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_passwd_loader.pyimport io

import sambacc.passwd_loader
from .test_config import config2

etc_passwd1 = """
root:x:0:0:root:/root:/bin/bash
bin:x:1:1:bin:/bin:/sbin/nologin
daemon:x:2:2:daemon:/sbin:/sbin/nologin
adm:x:3:4:adm:/var/adm:/sbin/nologin
lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
sync:x:5:0:sync:/sbin:/bin/sync
shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
halt:x:7:0:halt:/sbin:/sbin/halt
mail:x:8:12:mail:/var/spool/mail:/sbin/nologin
operator:x:11:0:operator:/root:/sbin/nologin
games:x:12:100:games:/usr/games:/sbin/nologin
ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin
nobody:x:65534:65534:Kernel Overflow User:/:/sbin/nologin
dbus:x:81:81:System message bus:/:/sbin/nologin
""".strip()

etc_group1 = """
root:x:0:
bin:x:1:
daemon:x:2:
sys:x:3:
adm:x:4:
tty:x:5:
disk:x:6:
lp:x:7:
mem:x:8:
kmem:x:9:
wheel:x:10:
cdrom:x:11:
mail:x:12:
man:x:15:
dialout:x:18:
floppy:x:19:
games:x:20:
tape:x:33:
video:x:39:
ftp:x:50:
lock:x:54:
audio:x:63:
users:x:100:
nobody:x:65534:
utmp:x:22:
utempter:x:35:
kvm:x:36:
dbus:x:81:
""".strip()


def test_read_existing_passwd():
    fh = io.StringIO(etc_passwd1)
    pfl = sambacc.passwd_loader.PasswdFileLoader()
    pfl.readfp(fh)
    assert len(pfl.lines) == 14
    assert pfl.lines[0].startswith("root")
    fh2 = io.StringIO()
    pfl.writefp(fh2)
    assert etc_passwd1 == fh2.getvalue()


def test_read_existing_group():
    fh = io.StringIO(etc_group1)
    pfl = sambacc.passwd_loader.GroupFileLoader()
    pfl.readfp(fh)
    assert len(pfl.lines) == 28
    assert pfl.lines[0].startswith("root")
    fh2 = io.StringIO()
    pfl.writefp(fh2)
    assert etc_group1 == fh2.getvalue()


def test_add_user():
    fh = io.StringIO(config2)
    g = sambacc.config.GlobalConfig(fh)
    ic = g.get("foobar")
    users = list(ic.users())

    pfl = sambacc.passwd_loader.PasswdFileLoader()
    for u in users:
        pfl.add_user(u)
    assert len(pfl.lines) == 3
    fh2 = io.StringIO()
    pfl.writefp(fh2)
    txt = fh2.getvalue()
    assert "alice:x:" in txt
    assert "bob:x:" in txt
    assert "carol:x:" in txt


def test_add_group():
    fh = io.StringIO(config2)
    g = sambacc.config.GlobalConfig(fh)
    ic = g.get("foobar")
    groups = list(ic.groups())

    gfl = sambacc.passwd_loader.GroupFileLoader()
    for g in groups:
        gfl.add_group(g)
    # test that duplicates don't add extra lines
    for g in groups:
        gfl.add_group(g)
    assert len(gfl.lines) == 3
    fh2 = io.StringIO()
    gfl.writefp(fh2)
    txt = fh2.getvalue()
    assert "alice:x:" in txt
    assert "bob:x:" in txt
    assert "carol:x:" in txt


def test_read_passwd_file(tmp_path):
    fname = tmp_path / "read_etc_passwd"
    with open(fname, "w") as fh:
        fh.write(etc_passwd1)
    pfl = sambacc.passwd_loader.PasswdFileLoader(fname)
    pfl.read()
    assert len(pfl.lines) == 14
    assert pfl.lines[0].startswith("root")
    fh2 = io.StringIO()
    pfl.writefp(fh2)
    assert etc_passwd1 == fh2.getvalue()


def test_write_passwd_file(tmp_path):
    fh = io.StringIO(config2)
    g = sambacc.config.GlobalConfig(fh)
    ic = g.get("foobar")
    users = list(ic.users())

    fname = tmp_path / "write_etc_passwd"
    with open(fname, "w") as fh:
        fh.write(etc_passwd1)

    pfl = sambacc.passwd_loader.PasswdFileLoader(fname)
    pfl.read()
    for u in users:
        pfl.add_user(u)
    # test that duplicates don't add extra lines
    for u in users:
        pfl.add_user(u)
    assert len(pfl.lines) == 17
    pfl.write()

    with open(fname) as fh:
        txt = fh.read()
    assert "root:x:" in txt
    assert "\nalice:x:" in txt
    assert "\nbob:x:" in txt
    assert "\ncarol:x:" in txt
0707010000006B000081A4000000000000000000000001684BE19C00000891000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/tests/test_paths.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import os
import pytest

import sambacc.paths


def test_ensure_samba_dirs_fail(tmp_path):
    # This is missing both "var/lib" and "run"
    with pytest.raises(OSError):
        sambacc.paths.ensure_samba_dirs(root=tmp_path)
    os.mkdir(tmp_path / "var")
    os.mkdir(tmp_path / "var/lib")
    # This is missing "run"
    with pytest.raises(OSError):
        sambacc.paths.ensure_samba_dirs(root=tmp_path)


def test_ensure_samba_dirs_ok(tmp_path):
    os.mkdir(tmp_path / "var")
    os.mkdir(tmp_path / "var/lib")
    os.mkdir(tmp_path / "run")
    sambacc.paths.ensure_samba_dirs(root=tmp_path)


def test_ensure_samba_dirs_already(tmp_path):
    os.mkdir(tmp_path / "var")
    os.mkdir(tmp_path / "var/lib")
    os.mkdir(tmp_path / "var/lib/samba")
    os.mkdir(tmp_path / "var/lib/samba/private")
    os.mkdir(tmp_path / "run")
    os.mkdir(tmp_path / "run/samba/")
    os.mkdir(tmp_path / "run/samba/winbindd")
    sambacc.paths.ensure_samba_dirs(root=tmp_path)


def test_ensure_share_dirs(tmp_path):
    assert not os.path.exists(tmp_path / "foobar")
    sambacc.paths.ensure_share_dirs("foobar", root=str(tmp_path))
    assert os.path.exists(tmp_path / "foobar")

    assert not os.path.exists(tmp_path / "wibble")
    sambacc.paths.ensure_share_dirs("/wibble/cat", root=str(tmp_path))
    assert os.path.exists(tmp_path / "wibble/cat")
    sambacc.paths.ensure_share_dirs("/wibble/cat", root=str(tmp_path))
    assert os.path.exists(tmp_path / "wibble/cat")
0707010000006C000081A4000000000000000000000001684BE19C00000B23000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/tests/test_permissions.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2022  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import os

import pytest


import sambacc.permissions


@pytest.mark.parametrize(
    "cls",
    [
        sambacc.permissions.NoopPermsHandler,
        sambacc.permissions.InitPosixPermsHandler,
        sambacc.permissions.AlwaysPosixPermsHandler,
    ],
)
def test_permissions_path(cls):
    assert cls("/foo", "user.foo", options={}).path() == "/foo"


def test_noop_handler():
    nh = sambacc.permissions.NoopPermsHandler("/foo", "user.foo", options={})
    assert nh.path() == "/foo"
    assert not nh.has_status()
    assert nh.status_ok()
    assert nh.update() is None


@pytest.fixture(scope="function")
def tmp_path_xattrs_ok(tmp_path_factory):
    try:
        import xattr  # type: ignore
    except ImportError:
        pytest.skip("xattr module not available")

    tmpp = tmp_path_factory.mktemp("needs_xattrs")
    try:
        xattr.set(str(tmpp), "user.deleteme", "1")
        xattr.remove(str(tmpp), "user.deleteme")
    except OSError:
        raise pytest.skip(
            "temp dir does not support xattrs"
            " (try changing basetmp to a file system that supports xattrs)"
        )
    return tmpp


def test_init_handler(tmp_path_xattrs_ok):
    path = tmp_path_xattrs_ok / "foo"
    os.mkdir(path)
    ih = sambacc.permissions.InitPosixPermsHandler(
        str(path), "user.marker", options={}
    )
    assert ih.path().endswith("/foo")
    assert not ih.has_status()
    assert not ih.status_ok()

    ih.update()
    assert ih.has_status()
    assert ih.status_ok()

    os.chmod(path, 0o755)
    ih.update()
    assert (os.stat(path).st_mode & 0o777) == 0o755


def test_always_handler(tmp_path_xattrs_ok):
    path = tmp_path_xattrs_ok / "foo"
    os.mkdir(path)
    ih = sambacc.permissions.AlwaysPosixPermsHandler(
        str(path), "user.marker", options={}
    )
    assert ih.path().endswith("/foo")
    assert not ih.has_status()
    assert not ih.status_ok()

    ih.update()
    assert ih.has_status()
    assert ih.status_ok()

    os.chmod(path, 0o755)
    assert (os.stat(path).st_mode & 0o777) == 0o755
    ih.update()
    assert (os.stat(path).st_mode & 0o777) == 0o777
0707010000006D000081A4000000000000000000000001684BE19C00001D2D000000000000000000000000000000000000003700000000sambacc-v0.6+git.60.2f89a38/tests/test_rados_opener.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io
import sys
import unittest.mock
import urllib.request

import pytest

import sambacc.rados_opener

# CAUTION: nearly all of these tests are based on mocking the ceph rados API.
# Testing this for real would require an operational ceph cluster and that's
# not happening for a simple set of unit tests!


def test_enable_rados_url_opener(monkeypatch):
    mock = unittest.mock.MagicMock()
    monkeypatch.setitem(sys.modules, "rados", mock)

    cls_mock = unittest.mock.MagicMock()
    sambacc.rados_opener.enable_rados(cls_mock)
    assert cls_mock._handlers.append.called


def test_enable_rados_url_opener_fail(monkeypatch):
    cls_mock = unittest.mock.MagicMock()
    sambacc.rados_opener.enable_rados(cls_mock)
    assert not cls_mock._handlers.append.called


def test_enable_rados_url_opener_with_args(monkeypatch):
    mock = unittest.mock.MagicMock()
    monkeypatch.setitem(sys.modules, "rados", mock)

    cls_mock = unittest.mock.MagicMock()
    cls_mock._handlers = []
    sambacc.rados_opener.enable_rados(cls_mock, client_name="user1")
    assert len(cls_mock._handlers) == 1
    assert isinstance(
        cls_mock._handlers[0]._interface, sambacc.rados_opener._RADOSInterface
    )
    assert cls_mock._handlers[0]._interface.api is mock
    assert cls_mock._handlers[0]._interface.client_name == "user1"
    assert not cls_mock._handlers[0]._interface.full_name
    ri = cls_mock._handlers[0]._interface
    ri.Rados()
    assert ri.api.Rados.call_args[1]["rados_id"] == "user1"
    assert ri.api.Rados.call_args[1]["name"] == ""
    assert (
        ri.api.Rados.call_args[1]["conffile"] == mock.Rados.DEFAULT_CONF_FILES
    )


def test_enable_rados_url_opener_with_args2(monkeypatch):
    mock = unittest.mock.MagicMock()
    monkeypatch.setitem(sys.modules, "rados", mock)

    cls_mock = unittest.mock.MagicMock()
    cls_mock._handlers = []
    sambacc.rados_opener.enable_rados(
        cls_mock, client_name="client.user1", full_name=True
    )
    assert len(cls_mock._handlers) == 1
    assert isinstance(
        cls_mock._handlers[0]._interface, sambacc.rados_opener._RADOSInterface
    )
    assert cls_mock._handlers[0]._interface.api is mock
    assert cls_mock._handlers[0]._interface.client_name == "client.user1"
    assert cls_mock._handlers[0]._interface.full_name
    ri = cls_mock._handlers[0]._interface
    ri.Rados()
    assert ri.api.Rados.call_args[1]["rados_id"] == ""
    assert ri.api.Rados.call_args[1]["name"] == "client.user1"
    assert (
        ri.api.Rados.call_args[1]["conffile"] == mock.Rados.DEFAULT_CONF_FILES
    )


def test_rados_handler_parse():
    class RH(sambacc.rados_opener._RADOSHandler):
        _rados_api = unittest.mock.MagicMock()

    rh = RH()
    rq = urllib.request.Request("rados://foo/bar/baz")
    rr = rh.rados_open(rq)
    assert rr._pool == "foo"
    assert rr._ns == "bar"
    assert rr._key == "baz"

    rq = urllib.request.Request("rados:///foo1/bar1/baz1")
    rr = rh.rados_open(rq)
    assert rr._pool == "foo1"
    assert rr._ns == "bar1"
    assert rr._key == "baz1"


def test_rados_handler_norados():
    # Generally, this shouldn't happen because the rados handler shouldn't
    # be added to the URLOpener if rados module was unavailable.
    class RH(sambacc.rados_opener._RADOSHandler):
        _interface = None

    rh = RH()
    rq = urllib.request.Request("rados://foo/bar/baz")
    with pytest.raises(sambacc.rados_opener.RADOSUnsupported):
        rh.rados_open(rq)


def test_rados_response_read_all():
    sval = b"Hello, World.\nI am a fake rados object.\n"

    def _read(_, size, off):
        if off < len(sval):
            return sval

    mock = unittest.mock.MagicMock()
    mock.Rados.return_value.open_ioctx.return_value.read.side_effect = _read

    rr = sambacc.rados_opener.RADOSObjectRef(mock, "foo", "bar", "baz")
    assert rr.readable()
    assert not rr.seekable()
    assert not rr.writable()
    assert not rr.isatty()
    assert rr.mode == "rb"
    assert rr.name == "baz"
    assert not rr.closed
    data = rr.read()
    assert data == sval
    assert rr.tell() == len(sval)
    rr.flush()
    rr.close()
    assert rr.closed


def test_rados_response_read_chunks():
    sval = b"a bad cat lives under the murky terrifying water"
    bio = io.BytesIO(sval)

    def _read(_, size, off):
        bio.seek(off)
        return bio.read(size)

    mock = unittest.mock.MagicMock()
    mock.Rados.return_value.open_ioctx.return_value.read.side_effect = _read

    rr = sambacc.rados_opener.RADOSObjectRef(mock, "foo", "bar", "baz")
    assert rr.readable()
    assert rr.read(8) == b"a bad ca"
    assert rr.read(8) == b"t lives "
    assert rr.read(8) == b"under th"


def test_rados_response_read_ctx_iter():
    sval = b"a bad cat lives under the murky terrifying water"
    bio = io.BytesIO(sval)

    def _read(_, size, off):
        bio.seek(off)
        return bio.read(size)

    mock = unittest.mock.MagicMock()
    mock.Rados.return_value.open_ioctx.return_value.read.side_effect = _read

    rr = sambacc.rados_opener.RADOSObjectRef(mock, "foo", "bar", "baz")
    with rr:
        result = [value for value in rr]
    assert result == [sval]
    with pytest.raises(ValueError):
        rr.read(8)


def test_rados_response_not_implemented():
    mock = unittest.mock.MagicMock()

    rr = sambacc.rados_opener.RADOSObjectRef(mock, "foo", "bar", "baz")
    with pytest.raises(NotImplementedError):
        rr.seek(10)
    with pytest.raises(NotImplementedError):
        rr.fileno()
    with pytest.raises(NotImplementedError):
        rr.readline()
    with pytest.raises(NotImplementedError):
        rr.readlines()
    with pytest.raises(NotImplementedError):
        rr.truncate()
    with pytest.raises(NotImplementedError):
        rr.write(b"zzzzz")
    with pytest.raises(NotImplementedError):
        rr.writelines([b"zzzzz"])


def test_rados_handler_config_key():
    class RH(sambacc.rados_opener._RADOSHandler):
        _interface = unittest.mock.MagicMock()

    mc = RH._interface.Rados.return_value.__enter__.return_value.mon_command
    mc.return_value = (0, b"rubber baby buggy bumpers", "")

    rh = RH()
    rq = urllib.request.Request("rados:mon-config-key:aa/bb/cc")
    rr = rh.rados_open(rq)
    assert isinstance(rr, io.BytesIO)
    assert rr.read() == b"rubber baby buggy bumpers"
    assert mc.called
    assert "aa/bb/cc" in mc.call_args[0][0]

    mc.reset_mock()
    mc.return_value = (2, b"", "no passing")
    rh = RH()
    rq = urllib.request.Request("rados:mon-config-key:xx/yy/zz")
    with pytest.raises(OSError) as pe:
        rh.rados_open(rq)
    assert getattr(pe.value, "errno", None) == 2
    assert mc.called
    assert "xx/yy/zz" in mc.call_args[0][0]
0707010000006E000081A4000000000000000000000001684BE19C000012DA000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/tests/test_samba_cmds.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sambacc.samba_cmds


def test_create_samba_command():
    cmd = sambacc.samba_cmds.SambaCommand("hello")
    assert cmd.name == "hello"
    cmd2 = cmd["world"]
    assert cmd.name == "hello"
    assert list(cmd) == ["hello"]
    assert list(cmd2) == ["hello", "world"]


def test_debug_command():
    cmd = sambacc.samba_cmds.SambaCommand("beep", debug="5")
    assert list(cmd) == ["beep", "--debuglevel=5"]


def test_global_debug():
    sambacc.samba_cmds.set_global_debug("7")
    try:
        cmd = sambacc.samba_cmds.SambaCommand("cheep")
        assert list(cmd) == ["cheep", "--debuglevel=7"]
    finally:
        sambacc.samba_cmds.set_global_debug("")


def test_global_prefix():
    # enabled
    sambacc.samba_cmds.set_global_prefix(["bob"])
    try:
        cmd = sambacc.samba_cmds.SambaCommand("deep")
        assert list(cmd) == ["bob", "deep"]
        assert cmd.name == "bob"
    finally:
        sambacc.samba_cmds.set_global_prefix([])

    # disabled
    cmd = sambacc.samba_cmds.SambaCommand("deep")
    assert list(cmd) == ["deep"]
    assert cmd.name == "deep"


def test_global_prefix_extended():
    # enabled
    sambacc.samba_cmds.set_global_prefix(["frank"])
    try:
        cmd = sambacc.samba_cmds.SambaCommand("deep")[
            "13", "--future=not-too-distant"
        ]
        assert list(cmd) == ["frank", "deep", "13", "--future=not-too-distant"]
        assert cmd.name == "frank"
    finally:
        sambacc.samba_cmds.set_global_prefix([])

    # disabled, must not "inherit" the prefix
    cmd2 = cmd["--scheme", "evil"]
    assert list(cmd2) == [
        "deep",
        "13",
        "--future=not-too-distant",
        "--scheme",
        "evil",
    ]
    assert cmd2.name == "deep"


def test_command_repr():
    cmd = sambacc.samba_cmds.SambaCommand("doop")
    cr = repr(cmd)
    assert cr.startswith("SambaCommand")
    assert "doop" in cr


def test_encode_none():
    res = sambacc.samba_cmds.encode(None)
    assert res == b""


def test_execute():
    import os

    cmd = sambacc.samba_cmds.SambaCommand("true")
    pid = os.fork()
    if pid == 0:
        sambacc.samba_cmds.execute(cmd)
    else:
        _, status = os.waitpid(pid, 0)
        assert status == 0


def test_create_command_args():
    # this is the simpler base class for SambaCommand. It lacks
    # the samba debug level option.
    cmd = sambacc.samba_cmds.CommandArgs("something")
    assert cmd.name == "something"
    cmd2 = cmd["nice"]
    assert cmd.name == "something"
    assert list(cmd) == ["something"]
    assert list(cmd2) == ["something", "nice"]


def test_command_args_repr():
    r = str(sambacc.samba_cmds.CommandArgs("something", ["nice"]))
    assert r.startswith("CommandArgs")
    assert "something" in r
    assert "nice" in r


def test_get_samba_specifics(monkeypatch):
    monkeypatch.setenv("SAMBA_SPECIFICS", "")
    ss = sambacc.samba_cmds.get_samba_specifics()
    assert not ss

    monkeypatch.setenv("SAMBA_SPECIFICS", "wibble,quux")
    ss = sambacc.samba_cmds.get_samba_specifics()
    assert ss
    assert len(ss) == 2
    assert "wibble" in ss
    assert "quux" in ss


def test_smbd_foreground(monkeypatch):
    monkeypatch.setenv("SAMBA_SPECIFICS", "")
    sf = sambacc.samba_cmds.smbd_foreground()
    assert "smbd" in sf.name
    assert "--log-stdout" in sf.argv()
    assert "--debug-stdout" not in sf.argv()

    monkeypatch.setenv("SAMBA_SPECIFICS", "daemon_cli_debug_output")
    sf = sambacc.samba_cmds.smbd_foreground()
    assert "smbd" in sf.name
    assert "--log-stdout" not in sf.argv()
    assert "--debug-stdout" in sf.argv()


def test_winbindd_foreground(monkeypatch):
    monkeypatch.setenv("SAMBA_SPECIFICS", "")
    wf = sambacc.samba_cmds.winbindd_foreground()
    assert "winbindd" in wf.name
    assert "--stdout" in wf.argv()
    assert "--debug-stdout" not in wf.argv()

    monkeypatch.setenv("SAMBA_SPECIFICS", "daemon_cli_debug_output")
    wf = sambacc.samba_cmds.winbindd_foreground()
    assert "winbindd" in wf.name
    assert "--stdout" not in wf.argv()
    assert "--debug-stdout" in wf.argv()
0707010000006F000081A4000000000000000000000001684BE19C000006E1000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_simple_waiter.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2021  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import sambacc.simple_waiter


def test_generate_sleeps():
    g = sambacc.simple_waiter.generate_sleeps()
    times = [next(g) for _ in range(130)]
    assert times[0] == 1
    assert times[0:11] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    assert times[12] == 5
    assert all(times[x] == 5 for x in range(12, 33))
    assert times[34] == 60
    assert all(times[x] == 60 for x in range(34, 130))


def test_sleeper():
    def gen():
        while True:
            yield 8

    cc = 0

    def fake_sleep(v):
        nonlocal cc
        cc += 1
        assert v == 8

    sleeper = sambacc.simple_waiter.Sleeper(times=gen())
    sleeper._sleep = fake_sleep
    sleeper.wait()
    assert cc == 1
    for _ in range(3):
        sleeper.wait()
    assert cc == 4

    cc = 0

    def fake_sleep2(v):
        nonlocal cc
        cc += 1
        assert v == 1

    sleeper = sambacc.simple_waiter.Sleeper()
    sleeper._sleep = fake_sleep2
    sleeper.wait()
    assert cc == 1
    for _ in range(3):
        sleeper.wait()
    assert cc == 4
07070100000070000081A4000000000000000000000001684BE19C00000CFB000000000000000000000000000000000000003000000000sambacc-v0.6+git.60.2f89a38/tests/test_skips.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2024  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

from unittest import mock

import pytest

from sambacc.commands import skips


@pytest.mark.parametrize(
    "value,rtype",
    [
        ("always:", skips.SkipAlways),
        ("file:/var/lib/womble", skips.SkipFile),
        ("file:!/var/lib/zomble", skips.SkipFile),
        ("env:LIMIT==none", skips.SkipEnv),
        ("env:LIMIT!=everybody", skips.SkipEnv),
        ("env:LIMIT=everybody", ValueError),
        ("env:LIMIT", ValueError),
        ("file:", ValueError),
        ("always:forever", ValueError),
        ("klunk:", KeyError),
    ],
)
def test_parse(value, rtype):
    if issubclass(rtype, BaseException):
        with pytest.raises(rtype):
            skips.parse(value)
        return
    skf = skips.parse(value)
    assert isinstance(skf, rtype)


@pytest.mark.parametrize(
    "value,ret",
    [
        ("file:/var/lib/foo/a", "skip-if-file-exists: /var/lib/foo/a exists"),
        (
            "file:!/var/lib/bar/a",
            "skip-if-file-missing: /var/lib/bar/a missing",
        ),
        ("file:/etc/blat", None),
        ("env:PLINK==0", "env var: PLINK -> 0 == 0"),
        ("env:PLINK!=88", "env var: PLINK -> 0 != 88"),
        ("env:PLONK==enabled", None),
        ("always:", "always skip"),
    ],
)
def test_method_test(value, ret, monkeypatch):
    def _exists(p):
        rv = p.startswith("/var/lib/foo/")
        return rv

    monkeypatch.setattr("os.path.exists", _exists)
    monkeypatch.setenv("PLINK", "0")
    monkeypatch.setenv("PLONK", "disabled")
    skf = skips.parse(value)
    ctx = mock.MagicMock()
    assert skf.test(ctx) == ret


def test_test(monkeypatch):
    def _exists(p):
        rv = p.startswith("/var/lib/foo/")
        return rv

    monkeypatch.setattr("os.path.exists", _exists)
    monkeypatch.setenv("PLINK", "0")
    monkeypatch.setenv("PLONK", "disabled")

    conds = [
        skips.SkipEnv("==", "PLINK", "1"),
        skips.SkipEnv("!=", "PLONK", "disabled"),
        skips.SkipAlways(),
    ]
    ctx = mock.MagicMock()
    assert skips.test(ctx, conditions=conds) == "always skip"
    conds = conds[:-1]
    assert not skips.test(ctx, conditions=conds)
    monkeypatch.setenv("PLINK", "1")
    assert skips.test(ctx, conditions=conds) == "env var: PLINK -> 1 == 1"

    ctx.cli.skip_conditions = conds
    assert skips.test(ctx) == "env var: PLINK -> 1 == 1"


def test_help_info():
    txt = skips._help_info()
    assert "file:" in txt
    assert "env:" in txt
    assert "always:" in txt


def test_parse_hack():
    import argparse

    with pytest.raises(argparse.ArgumentTypeError):
        skips.parse("?")
07070100000071000081A4000000000000000000000001684BE19C00000A9D000000000000000000000000000000000000003600000000sambacc-v0.6+git.60.2f89a38/tests/test_smbconf_api.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import io

import sambacc.smbconf_api


def test_simple_config_store():
    scs = sambacc.smbconf_api.SimpleConfigStore()
    assert scs.writeable, "SimpleConfigStore should always be writeable"
    scs["foo"] = [("a", "Artichoke"), ("b", "Broccoli")]
    scs["bar"] = [("example", "yes"), ("production", "no")]
    assert list(scs) == ["foo", "bar"]
    assert scs["foo"] == [("a", "Artichoke"), ("b", "Broccoli")]
    assert scs["bar"] == [("example", "yes"), ("production", "no")]


def test_simple_config_store_import():
    a = sambacc.smbconf_api.SimpleConfigStore()
    b = sambacc.smbconf_api.SimpleConfigStore()
    a["foo"] = [("a", "Artichoke"), ("b", "Broccoli")]
    b["bar"] = [("example", "yes"), ("production", "no")]
    assert list(a) == ["foo"]
    assert list(b) == ["bar"]

    a.import_smbconf(b)
    assert list(a) == ["foo", "bar"]
    assert list(b) == ["bar"]
    assert a["bar"] == [("example", "yes"), ("production", "no")]

    b["baz"] = [("quest", "one")]
    b["bar"] = [("example", "no"), ("production", "no"), ("unittest", "yes")]
    a.import_smbconf(b)

    assert list(a) == ["foo", "bar", "baz"]
    assert a["bar"] == [
        ("example", "no"),
        ("production", "no"),
        ("unittest", "yes"),
    ]
    assert a["baz"] == [("quest", "one")]


def test_write_store_as_smb_conf():
    scs = sambacc.smbconf_api.SimpleConfigStore()
    scs["foo"] = [("a", "Artichoke"), ("b", "Broccoli")]
    scs["bar"] = [("example", "yes"), ("production", "no")]
    scs["global"] = [("first", "1"), ("second", "2")]
    fh = io.StringIO()
    sambacc.smbconf_api.write_store_as_smb_conf(fh, scs)
    res = fh.getvalue().splitlines()
    assert res[0] == ""
    assert res[1] == "[global]"
    assert res[2] == "\tfirst = 1"
    assert res[3] == "\tsecond = 2"
    assert "[foo]" in res
    assert "\ta = Artichoke" in res
    assert "\tb = Broccoli" in res
    assert "[bar]" in res
    assert "\texample = yes" in res
    assert "\tproduction = no" in res
07070100000072000081A4000000000000000000000001684BE19C0000159B000000000000000000000000000000000000003800000000sambacc-v0.6+git.60.2f89a38/tests/test_smbconf_samba.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import pytest

import sambacc.smbconf_api
import sambacc.smbconf_samba

smb_conf_reg_stub = """
[global]
cache directory = {path}
state directory = {path}
private dir = {path}
include = registry
"""

smb_conf_sample = """
[global]
  realm = my.kingdom.fora.horse

[share_a]
  path = /foo/bar/baz
  read only = no
[share_b]
  path = /foo/x/b
  read only = no
[share_c]
  path = /foo/x/c
  read only = no
[share_d]
  path = /foo/x/d
  read only = no
[share_e]
  path = /foo/x/e
  read only = no
"""


def _import_probe():
    try:
        import samba.smbconf  # type: ignore
        import samba.samba3.smbconf  # type: ignore # noqa
    except ImportError:
        pytest.skip("unable to load samba smbconf modules")


def _smb_data(path, smb_conf_text):
    data_path = path / "_samba"
    data_path.mkdir()
    smb_conf_path = path / "smb.conf"
    smb_conf_path.write_text(smb_conf_text.format(path=data_path))
    return smb_conf_path


@pytest.fixture(scope="session")
def smbconf_reg_once(tmp_path_factory):
    _import_probe()
    tmp_path = tmp_path_factory.mktemp("smb_reg")
    smb_conf_path = _smb_data(tmp_path, smb_conf_reg_stub)

    return sambacc.smbconf_samba.SMBConf.from_registry(str(smb_conf_path))


@pytest.fixture(scope="function")
def smbconf_reg(smbconf_reg_once):
    # IMPORTANT: Reminder, samba doesn't release the registry db once opened.
    smbconf_reg_once._smbconf.drop()
    return smbconf_reg_once


@pytest.fixture(scope="function")
def smbconf_file(tmp_path):
    _import_probe()
    smb_conf_path = _smb_data(tmp_path, smb_conf_sample)

    return sambacc.smbconf_samba.SMBConf.from_file(str(smb_conf_path))


def test_smbconf_file_read(smbconf_file):
    assert smbconf_file["global"] == [("realm", "my.kingdom.fora.horse")]
    assert smbconf_file["share_a"] == [
        ("path", "/foo/bar/baz"),
        ("read only", "no"),
    ]
    with pytest.raises(KeyError):
        smbconf_file["not_there"]
    assert list(smbconf_file) == [
        "global",
        "share_a",
        "share_b",
        "share_c",
        "share_d",
        "share_e",
    ]


def test_smbconf_write(smbconf_file):
    assert not smbconf_file.writeable
    with pytest.raises(Exception):
        smbconf_file.import_smbconf(sambacc.smbconf_api.SimpleConfigStore())


def test_smbconf_reg_write_read(smbconf_reg):
    assert smbconf_reg.writeable
    assert list(smbconf_reg) == []
    smbconf_reg["global"] = [("test:one", "1"), ("test:two", "2")]
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "2")]
    smbconf_reg["global"] = [("test:one", "1"), ("test:two", "22")]
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "22")]


def test_smbconf_reg_write_txn_read(smbconf_reg):
    assert smbconf_reg.writeable
    assert list(smbconf_reg) == []
    with smbconf_reg:
        smbconf_reg["global"] = [("test:one", "1"), ("test:two", "2")]
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "2")]
    with smbconf_reg:
        smbconf_reg["global"] = [("test:one", "1"), ("test:two", "22")]
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "22")]

    # transaction with error
    with pytest.raises(ValueError):
        with smbconf_reg:
            smbconf_reg["global"] = [("test:one", "1"), ("test:two", "2222")]
            raise ValueError("foo")
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "22")]

    # no transaction with error
    with pytest.raises(ValueError):
        smbconf_reg["global"] = [("test:one", "1"), ("test:two", "2222")]
        raise ValueError("foo")
    assert smbconf_reg["global"] == [("test:one", "1"), ("test:two", "2222")]


def test_smbconf_reg_import_batched(smbconf_reg, smbconf_file):
    assert list(smbconf_reg) == []
    smbconf_reg.import_smbconf(smbconf_file, batch_size=4)
    assert smbconf_reg["global"] == [("realm", "my.kingdom.fora.horse")]
    assert smbconf_reg["share_a"] == [
        ("path", "/foo/bar/baz"),
        ("read only", "no"),
    ]
    with pytest.raises(KeyError):
        smbconf_reg["not_there"]
    assert list(smbconf_reg) == [
        "global",
        "share_a",
        "share_b",
        "share_c",
        "share_d",
        "share_e",
    ]


def test_smbconf_reg_import_unbatched(smbconf_reg, smbconf_file):
    assert list(smbconf_reg) == []
    smbconf_reg.import_smbconf(smbconf_file, batch_size=None)
    assert smbconf_reg["global"] == [("realm", "my.kingdom.fora.horse")]
    assert smbconf_reg["share_a"] == [
        ("path", "/foo/bar/baz"),
        ("read only", "no"),
    ]
    with pytest.raises(KeyError):
        smbconf_reg["not_there"]
    assert list(smbconf_reg) == [
        "global",
        "share_a",
        "share_b",
        "share_c",
        "share_d",
        "share_e",
    ]
07070100000073000081A4000000000000000000000001684BE19C00001372000000000000000000000000000000000000003500000000sambacc-v0.6+git.60.2f89a38/tests/test_url_opener.py#
# sambacc: a samba container configuration tool
# Copyright (C) 2023  John Mulligan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>
#

import errno
import http
import http.server
import os
import sys
import threading
import urllib.request

import pytest

import sambacc.url_opener


class _Server:
    def __init__(self, port=8111):
        port = int(os.environ.get("SAMBACC_TEST_HTTP_PORT", port))
        self._port = port
        self._server = http.server.HTTPServer(("127.0.0.1", port), _Handler)

    @property
    def port(self):
        return self._port

    def start(self):
        self._t = threading.Thread(target=self._server.serve_forever)
        self._t.start()

    def stop(self):
        sys.stdout.flush()
        self._server.shutdown()
        self._t.join()


class _Handler(http.server.BaseHTTPRequestHandler):
    def do_GET(self):
        word = self.path.split("/")[-1]
        method = f"get_{word}"
        return getattr(self, method)()

    def get_a(self):
        return self._ok("Wilbur was Right")

    def get_b(self):
        return self._ok("This is a test")

    def get_err404(self):
        self._err(http.HTTPStatus.NOT_FOUND, "Not Found")

    def get_err401(self):
        self._err(http.HTTPStatus.UNAUTHORIZED, "Unauthorized")

    def get_err403(self):
        self._err(http.HTTPStatus.FORBIDDEN, "Forbidden")

    def _ok(self, value):
        self.send_response(http.HTTPStatus.OK)
        self.send_header("Content-Type", "text/plain")
        self.send_header("Content-Length", str(len(value)))
        self.end_headers()
        self.wfile.write(value.encode("utf8"))

    def _err(self, err_value, err_msg):
        self.send_response(err_value)
        self.send_header("Content-Type", "text/plain")
        self.send_header("Content-Length", str(len(err_msg)))
        self.end_headers()
        self.wfile.write(err_msg.encode("utf8"))


@pytest.fixture(scope="module")
def http_server():
    srv = _Server()
    srv.start()
    try:
        yield srv
    finally:
        srv.stop()


def test_success_1(http_server):
    url = f"http://localhost:{http_server.port}/a"
    opener = sambacc.url_opener.URLOpener()
    res = opener.open(url)
    assert res.read() == b"Wilbur was Right"


def test_success_2(http_server):
    url = f"http://localhost:{http_server.port}/b"
    opener = sambacc.url_opener.URLOpener()
    res = opener.open(url)
    assert res.read() == b"This is a test"


def test_error_404(http_server):
    url = f"http://localhost:{http_server.port}/err404"
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(OSError) as err:
        opener.open(url)
    assert err.value.status == 404
    assert err.value.errno == errno.ENOENT


def test_error_401(http_server):
    url = f"http://localhost:{http_server.port}/err401"
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(OSError) as err:
        opener.open(url)
    assert err.value.status == 401
    assert err.value.errno == errno.EPERM


def test_error_403(http_server):
    url = f"http://localhost:{http_server.port}/err403"
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(OSError) as err:
        opener.open(url)
    assert err.value.status == 403
    # No errno mapped for this one


def test_map_errno(http_server):
    url = f"http://localhost:{http_server.port}/err401"
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(OSError) as err:
        opener.open(url)
    # do not replace an existing errno
    err.value.errno = errno.EIO
    sambacc.url_opener._map_errno(err.value)
    assert err.value.errno == errno.EIO


def test_unknown_url():
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(sambacc.url_opener.SchemeNotSupported):
        opener.open("bloop://foo/bar/baz")


def test_unknown_url_type():
    opener = sambacc.url_opener.URLOpener()
    with pytest.raises(sambacc.url_opener.SchemeNotSupported):
        opener.open("bonk-bonk-bonk")


def test_value_error_during_handling():
    class H(urllib.request.BaseHandler):
        def bonk_open(self, req):
            raise ValueError("fiddlesticks")

    class UO(sambacc.url_opener.URLOpener):
        _handlers = sambacc.url_opener.URLOpener._handlers + [H]

    opener = UO()
    with pytest.raises(ValueError) as err:
        opener.open("bonk:bonk")
    assert str(err.value) == "fiddlesticks"
07070100000074000081A4000000000000000000000001684BE19C00000FD5000000000000000000000000000000000000002400000000sambacc-v0.6+git.60.2f89a38/tox.ini
[tox]
envlist = flake8, formatting, {py3,py39}-mypy, py3, py39, schemacheck, py3-sys
isolated_build = True

[testenv]
description = Run unit tests
passenv =
    WRITABLE_PASSWD
    NSS_WRAPPER_PASSWD
    NSS_WRAPPER_GROUP
deps =
    pytest
    pytest-cov
    dnspython
    -e .[validation,yaml,toml,grpc]
commands =
    py.test -v --cov=sambacc --cov-report=html {posargs:tests}

[testenv:{py3,py39}-mypy]
description = Run mypy static checker tool
deps =
    mypy
    types-setuptools
    types-pyyaml
    types-jsonschema>=4.10
    types-protobuf
    types-grpcio
    tomli
    {[testenv]deps}
commands =
    mypy sambacc tests

[testenv:py3-sys]
description = Run unit tests with system packages to validate Samba integration
# py3-sys -- more like sisyphus, am I right?
#
# In order to run tests that rely on "system level" packages (samba,
# xattr, etc.), and not have a lot of test skips, we have to enable the
# sitepackages option. However when it is enabled and you already have a tool
# (mypy, pytest, etc.) installed at the system tox emits a `command found but
# not installed in testenv` warning. We can avoid all those warnings except for
# the 'py3' env by putting all that system enablement stuff only in this
# section.
sitepackages = True
deps =
    pytest
    pytest-cov
    dnspython
    inotify_simple
    pyxattr
allowlist_externals =
    /usr/bin/py.test

[testenv:formatting]
description = Check the style/formatting for the source files
deps =
    black>=24, <25
commands =
    black --check -v --extend-exclude sambacc/grpc/generated .

[testenv:flake8]
description = Basic python linting for the source files
deps =
    flake8
commands =
    flake8 --exclude sambacc/grpc/generated sambacc tests

[testenv:schemacheck]
description = Check the JSON Schema files are valid
deps =
    black>=24, <25
    PyYAML
commands =
    python -m sambacc.schema.tool

[testenv:schemaupdate]
description = Regenerate source files from JSON Schema file(s)
deps =
    black>=24, <25
    PyYAML
commands =
    python -m sambacc.schema.tool --update

# this gitlint rule is not run by default.
# Run it manually with: tox -e gitlint
[testenv:gitlint]
description = Check the formatting of Git commit messages
deps =
    gitlint==0.19.1
commands =
    gitlint -C .gitlint --commits origin/master.. lint


# IMPORTANT: note that there are two environments provided here for generating
# the grpc/protobuf files. One uses a typical tox environment with versions
# and the other uses system packages (sitepackages=True).
# The former is what developers are expected to use HOWEVER because we must
# deliver on enterprise linux platforms we provide a way to generate
# the code using system packages for comparison purposes.

# Generate grpc/protobuf code from .proto files.
# Includes a generator for .pyi files.
# Uses sed to fix the foolish import behavior of the grpc generator.
[testenv:grpc-generate]
description = Generate gRPC files
deps =
    grpcio-tools ~= 1.48.0
    protobuf ~= 3.19.0
    mypy-protobuf
allowlist_externals = sed
commands =
    python -m grpc_tools.protoc \
        -I sambacc/grpc/protobufs \
        --python_out=sambacc/grpc/generated \
        --grpc_python_out=sambacc/grpc/generated \
        --mypy_out=sambacc/grpc/generated \
        sambacc/grpc/protobufs/control.proto
    sed -i -E 's/^import.*_pb2/from . \0/' \
        sambacc/grpc/generated/control_pb2_grpc.py

# Generate grpc/protobuf code from .proto files using system packages.
# Does NOT include a generator for .pyi files.
# Uses sed to fix the foolish import behavior of the grpc generator.
[testenv:grpc-sys-generate]
description = Generate gRPC files using system python packages
sitepackages = True
allowlist_externals = sed
commands =
    python -m grpc_tools.protoc \
        -I sambacc/grpc/protobufs \
        --python_out=sambacc/grpc/generated \
        --grpc_python_out=sambacc/grpc/generated \
        sambacc/grpc/protobufs/control.proto
    sed -i -E 's/^import.*_pb2/from . \0/' \
        sambacc/grpc/generated/control_pb2_grpc.py
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!1093 blocks
openSUSE Build Service is sponsored by