File snowflake-2.11.0.obscpio of Package snowflake

07070100000000000081A400000000000000000000000167D9BD4E000000E6000000000000000000000000000000000000001C00000000snowflake-2.11.0/.gitignore*.swp
*.swo
*.swn
*.swm
.DS_Store
datadir/
broker/broker
client/client
server/server
proxy/proxy
probetest/probetest
snowflake.log
ignore/

# from running the vagrant setup
/.vagrant/
/sdk-tools-linux-*.zip*
/android-ndk-*
/tools/07070100000001000081A400000000000000000000000167D9BD4E00003102000000000000000000000000000000000000002000000000snowflake-2.11.0/.gitlab-ci.ymlstages:
  - test
  - deploy
  - container-build
  - container-mirror

variables:
  DEBIAN_FRONTEND: noninteractive
  DEBIAN_OLD_STABLE: buster
  DEBIAN_STABLE: bullseye
  REPRODUCIBLE_FLAGS: -trimpath -ldflags=-buildid=

# set up apt for automated use
.apt-template: &apt-template
- export LC_ALL=C.UTF-8
- export DEBIAN_FRONTEND=noninteractive
- ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime
- echo 'quiet "1";'
       'APT::Install-Recommends "0";'
       'APT::Install-Suggests "0";'
       'APT::Acquire::Retries "20";'
       'APT::Get::Assume-Yes "true";'
       'Dpkg::Use-Pty "0";'
      > /etc/apt/apt.conf.d/99gitlab
- apt-get update
- apt-get dist-upgrade


# Set things up to use the OS-native packages for Go.  Anything that
# is downloaded by go during the `go fmt` stage is not coming from the
# Debian/Ubuntu repo. So those would need to be packaged for this to
# make it into Debian and/or Ubuntu.
.debian-native-template: &debian-native-template
  variables:
    GOPATH: /usr/share/gocode
  before_script:
    - apt-get update
    - apt-get -qy install --no-install-recommends
        build-essential
        ca-certificates
        git
        golang
        golang-github-cheekybits-genny-dev
        golang-github-jtolds-gls-dev
        golang-github-klauspost-reedsolomon-dev
        golang-github-lucas-clemente-quic-go-dev
        golang-github-smartystreets-assertions-dev
        golang-github-smartystreets-goconvey-dev
        golang-github-tjfoc-gmsm-dev
        golang-github-xtaci-kcp-dev
        golang-github-xtaci-smux-dev
        golang-golang-x-crypto-dev
        golang-golang-x-net-dev
        golang-goptlib-dev
        golang-golang-x-sys-dev
        golang-golang-x-text-dev
        golang-golang-x-xerrors-dev

# use Go installed as part of the official, Debian-based Docker images
.golang-docker-debian-template: &golang-docker-debian-template
  before_script:
    - apt-get update
    - apt-get -qy install --no-install-recommends
        ca-certificates
        git

.go-test: &go-test
  - gofmt -d .
  - test -z "$(go fmt ./...)"
  - go vet ./...
  - go test -v -race ./...

  - cd $CI_PROJECT_DIR/client/
  - go get
  - go build $REPRODUCIBLE_FLAGS

.test-template: &test-template
  artifacts:
    name: "${CI_PROJECT_PATH}_${CI_JOB_STAGE}_${CI_JOB_ID}_${CI_COMMIT_REF_NAME}_${CI_COMMIT_SHA}"
    paths:
      - client/*.aar
      - client/*.jar
      - client/client
    expire_in: 1 week
    when: on_success
  after_script:
    - echo "Download debug artifacts from https://gitlab.com/${CI_PROJECT_PATH}/-/jobs"
    # this file changes every time but should not be cached
    - rm -f $GRADLE_USER_HOME/caches/modules-2/modules-2.lock
    - rm -rf $GRADLE_USER_HOME/caches/*/plugin-resolution/

# -- jobs ------------------------------------------------------------

android:
  image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.23-$DEBIAN_STABLE
  variables:
    ANDROID_HOME: /usr/lib/android-sdk
    LANG: C.UTF-8
  cache:
    paths:
      - .gradle/wrapper
      - .gradle/caches
  <<: *test-template
  before_script:
    - *apt-template
    - apt-get install
        android-sdk-platform-23
        android-sdk-platform-tools
        build-essential
        curl
        default-jdk-headless
        git
        gnupg
        unzip
        wget
        ca-certificates

    - ndk=android-ndk-r21e-linux-x86_64.zip
    - wget --continue --no-verbose https://dl.google.com/android/repository/$ndk
    - echo "ad7ce5467e18d40050dc51b8e7affc3e635c85bd8c59be62de32352328ed467e  $ndk" > $ndk.sha256
    - sha256sum -c $ndk.sha256
    - unzip -q $ndk
    - rm ${ndk}*
    - mv android-ndk-* $ANDROID_HOME/ndk-bundle/

    - chmod -R a+rX $ANDROID_HOME
  script:
    - *go-test
    - export GRADLE_USER_HOME=$CI_PROJECT_DIR/.gradle
    - go version
    - go env

    - go get golang.org/x/mobile/cmd/gomobile
    - go get golang.org/x/mobile/cmd/gobind
    - go install golang.org/x/mobile/cmd/gobind
    - go install golang.org/x/mobile/cmd/gomobile
    - gomobile init

    - cd $CI_PROJECT_DIR/client
    # gomobile builds a shared library not a CLI executable
    - sed -i 's,^package main$,package snowflakeclient,' *.go
    - go get golang.org/x/mobile/bind
    - gomobile bind -v -target=android $REPRODUCIBLE_FLAGS .

go-1.21:
  image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.21-$DEBIAN_STABLE
  <<: *golang-docker-debian-template
  <<: *test-template
  script:
    - *go-test

go-1.23:
  image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.23-$DEBIAN_STABLE
  <<: *golang-docker-debian-template
  <<: *test-template
  script:
    - *go-test

debian-testing:
  image: debian:testing
  <<: *debian-native-template
  <<: *test-template
  script:
    - *go-test

shadow-integration:
  image: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:golang-1.21-$DEBIAN_STABLE
  variables:
    SHADOW_VERSION: "193924aae0dab30ffda0abe29467f552949849fa"
    TGEN_VERSION: "v1.1.2"
  cache:
    key: sf-integration-$SHADOW_VERSION-$TGEN_VERSION
    paths:
      - /opt/
  artifacts:
    paths:
      - shadow.data.tar.gz
    when: on_failure
  tags:
    - amd64
    - tpa
  script:
    - apt-get update
    - apt-get install -y git tor
    - mkdir -p ~/.local/bin
    - mkdir -p ~/.local/src
    - export PATH=$PATH:$CI_PROJECT_DIR/opt/bin/

    # Install shadow and tgen
    - pushd ~/.local/src
    - |
      if [ ! -f opt/shadow/bin/shadow ]
      then
        echo "The required version of shadow was not cached, building from source"
        git clone --shallow-since=2021-08-01 https://github.com/shadow/shadow.git
        pushd shadow/
        git checkout $SHADOW_VERSION
        CONTAINER=debian:stable-slim ci/container_scripts/install_deps.sh
        CC=gcc CONTAINER=debian:stable-slim ci/container_scripts/install_extra_deps.sh
        export PATH="$HOME/.cargo/bin:${PATH}"
        ./setup build --jobs $(nproc) --prefix $CI_PROJECT_DIR/opt/
        ./setup install
        popd
      fi
    - |
      if [ ! -f opt/shadow/bin/tgen ]
      then
        echo "The required version of tgen was not cached, building from source"
        git clone --branch $TGEN_VERSION --depth 1 https://github.com/shadow/tgen.git
        pushd tgen/
        apt-get install -y cmake libglib2.0-dev libigraph-dev
        mkdir build && cd build
        cmake .. -DCMAKE_INSTALL_PREFIX=$CI_PROJECT_DIR/opt/
        make
        make install
        popd
      fi
      install $CI_PROJECT_DIR/opt/bin/tgen ~/.local/bin/tgen
    - popd

    # Apply snowflake patch(es)
    - |
      git clone --depth 1 https://github.com/cohosh/shadow-snowflake-minimal
      git am -3 shadow-snowflake-minimal/*.patch

    # Install snowflake binaries to .local folder
    - |
      for app in "proxy" "client" "server" "broker" "probetest"; do
        pushd $app
        go build
        install $app ~/.local/bin/snowflake-$app
        popd
      done

    # Install stun server
    - GOBIN=~/.local/bin go install github.com/gortc/stund@latest

    # Run a minimal snowflake shadow experiment
    - pushd shadow-snowflake-minimal/
    - shadow --log-level=debug --model-unblocked-syscall-latency=true snowflake-minimal.yaml > shadow.log

    # Check to make sure streams succeeded
    - |
      if [ $(grep -c "stream-success" shadow.data/hosts/snowflakeclient/tgen.*.stdout) = 10 ]
      then
        echo "All streams in shadow completed successfully"
      else
        echo "Shadow simulation failed"
        exit 1
      fi
  after_script:
    - tar -czvf $CI_PROJECT_DIR/shadow.data.tar.gz shadow-snowflake-minimal/shadow.data/ shadow-snowflake-minimal/shadow.log

generate_tarball:
  stage: deploy
  image: golang:1.21-$DEBIAN_STABLE
  rules:
    - if: $CI_COMMIT_TAG
  script:
    - go mod vendor
    - tar czf ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz --transform "s,^,${CI_PROJECT_NAME}-${CI_COMMIT_TAG}/," *
  after_script:
    - echo TAR_JOB_ID=$CI_JOB_ID >> generate_tarball.env
  artifacts:
    paths:
      - ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz
    reports:
      dotenv: generate_tarball.env

release-job:
  stage: deploy
  image: registry.gitlab.com/gitlab-org/release-cli:latest
  rules:
    - if: $CI_COMMIT_TAG
  needs:
    - job: generate_tarball
      artifacts: true
  script:
    - echo "running release_job"
  release:
    name: 'Release $CI_COMMIT_TAG'
    description: 'Created using the release-cli'
    tag_name: '$CI_COMMIT_TAG'
    ref: '$CI_COMMIT_TAG'
    assets:
      links:
        - name: '${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz'
          url: '${CI_PROJECT_URL}/-/jobs/${TAR_JOB_ID}/artifacts/file/${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz'

# Build the container only if the commit is to main, or it is a tag.
# If the commit is to main, then the docker image tag should be set to `nightly`.
# If it is a tag, then the docker image tag should be set to the tag name.
build-container:
  variables:
    TAG: $CI_COMMIT_TAG # Will not be set on a non-tag build, will be set later 
  stage: container-build
  parallel:
    matrix:
      - ARCH: amd64
      - ARCH: arm64
      - ARCH: s390x
  tags:
    - $ARCH
  image:
    name: gcr.io/kaniko-project/executor:debug
    entrypoint: [""]
  script:
    - if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi
    - >-
      /kaniko/executor
      --context "${CI_PROJECT_DIR}"
      --dockerfile "${CI_PROJECT_DIR}/Dockerfile"
      --destination "${CI_REGISTRY_IMAGE}:${TAG}_${ARCH}"
  rules:
    - if: $CI_COMMIT_REF_NAME == "main"
    - if: $CI_COMMIT_TAG

merge-manifests:
  variables:
    TAG: $CI_COMMIT_TAG
  stage: container-build
  needs:
    - job: build-container
      artifacts: false
  image:
      name: containers.torproject.org/tpo/anti-censorship/duplicatedcontainerimages:mplatform-manifest-tool-alpine
      entrypoint: [""]
  script:
    - if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi
    - >-
      manifest-tool
      --username="${CI_REGISTRY_USER}"
      --password="${CI_REGISTRY_PASSWORD}"
      push from-args
      --platforms linux/amd64,linux/arm64,linux/s390x
      --template "${CI_REGISTRY_IMAGE}:${TAG}_ARCH"
      --target "${CI_REGISTRY_IMAGE}:${TAG}"
  rules:
    - if: $CI_COMMIT_REF_NAME == "main"
      when: always
    - if: $CI_COMMIT_TAG
      when: always

# If this is a tag, then we want to additionally tag the image as `latest`
tag-container-release:
  stage: container-build
  needs:
    - job: merge-manifests
      artifacts: false
  image: 
      name: gcr.io/go-containerregistry/crane:debug
      entrypoint: [""]
  allow_failure: false
  variables:
    CI_REGISTRY: $CI_REGISTRY
    IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
    RELEASE_TAG: $CI_REGISTRY_IMAGE:latest
  script:
    - echo "Tagging docker image with stable tag with crane"
    - echo -n "$CI_JOB_TOKEN" | crane auth login $CI_REGISTRY -u gitlab-ci-token --password-stdin
    - crane cp $IMAGE_TAG $RELEASE_TAG
  rules:
    - if: $CI_COMMIT_TAG
      when: always

clean-image-tags:
  stage: container-build
  needs:
    - job: merge-manifests
      artifacts: false
  image: containers.torproject.org/tpo/tpa/base-images/debian:bookworm
  before_script:
    - *apt-template
    - apt-get install -y jq curl
  script:
    - "REGISTRY_ID=$(curl --silent --request GET --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories\" | jq '.[].id')"
    - "curl --request DELETE --data \"name_regex_delete=(latest|${CI_COMMIT_TAG})_.*\" --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories/${REGISTRY_ID}/tags\""
  rules:
    - if: $CI_COMMIT_REF_NAME == "main"
      when: always
    - if: $CI_COMMIT_TAG
      when: always
      
mirror-image-to-dockerhub:
  stage: container-mirror
  variables:
    DOCKERHUB_MIRROR_REPOURL: $DOCKERHUB_MIRROR_REPOURL
    DOCKERHUB_USERNAME: $DOCKERHUB_MIRROR_USERNAME
    DOCKERHUB_PASSWORD: $DOCKERHUB_MIRROR_PASSWORD
  image: 
    name: gcr.io/go-containerregistry/crane:debug
    entrypoint: [""]
  rules:
    - if: $CI_COMMIT_REF_NAME == "main"
      when: always
    - if: $CI_COMMIT_TAG
      when: always
  script:
    - echo "$DOCKERHUB_PASSWORD" | crane auth login docker.io -u $DOCKERHUB_MIRROR_USERNAME --password-stdin
    - crane cp -a containers.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake $DOCKERHUB_MIRROR_REPOURL
07070100000002000081A400000000000000000000000167D9BD4E00000000000000000000000000000000000000000000001D00000000snowflake-2.11.0/.gitmodules07070100000003000081A400000000000000000000000167D9BD4E000000D0000000000000000000000000000000000000001D00000000snowflake-2.11.0/.travis.ymllanguage: go

dist: xenial

go_import_path: git.torproject.org/pluggable-transports/snowflake.git/v2

go:
    - 1.13.x

script:
    - test -z "$(go fmt ./...)"
    - go vet ./...
    - go test -v -race ./...
07070100000004000081A400000000000000000000000167D9BD4E000000A5000000000000000000000000000000000000002100000000snowflake-2.11.0/CONTRIBUTING.md
- When editing Go, please run `go fmt` before every commit.

- You may run tests locally with either `go test` or `npm test` for Go and
  JavaScript, respectively.
07070100000005000081A400000000000000000000000167D9BD4E00003291000000000000000000000000000000000000001B00000000snowflake-2.11.0/ChangeLogChanges in version v2.11.0 - 2025-03-18
- Fix data race warnings for tokens_t
- Fix race condition in proxy connection count stats
- Make NATPolicy thread-safe
- Fix race conditions with error scope
- Fix race condition with proxy isClosing variable
- Issue 40454: Update broker metrics to count matches, denials, and timeouts
- Add proxy event and metrics for failed connections
- Issue 40377: Create CI artifact if shadow fails
- Issue 40438: Copy base client config for each SOCKS connection
- Fix minor data race in Snowflake broker metrics
- Issue 40363: Process and read broker SQS messages more quickly
- Issue 40419: delay before calling dc.Close() to improve NAT test on proxy
- Add country stats to proxy prometheus metrics
- Issue 40381: Avoid snowflake client dependency in proxy
- Issue 40446: Lower broker ClientTimeout to 5 seconds in line with CDN77 defaults
- Refactor out utls library into ptutil/utls
- Issue 40414: Use /etc/localtime for CI
- Issue 40440: Add LE self-signed ISRG Root X1 to cert pool
- Proxy refactor to simplify tokens.ret() on error
- Clarify ephemeral-ports-range proxy option
- Issue 40417: Fixes and updates to CI containers
- Issue 40178: Handle unknown client type better
- Issue 40304: Update STUN server list
- Issue 40210: Remove proxy log when offer is nil
- Issue 40413: Log EventOnCurrentNATTypeDetermined for proxy
- Use named return for some functions to improve readability
- Issue 40271: Use pion SetIPFilter rather than our own StripLocalAddress
- Issue 40413: Suppress logs of proxy events by default
- Add IsLinkLocalUnicast in IsLocal
- Fix comments
- Bump versions of dependencies

Changes in version v2.10.1 - 2024-11-11
- Issue 40406: Update version string

Changes in version v2.10.0 - 2024-11-07
- Issue 40402: Add proxy event for when client has connected
- Issue 40405: Prevent panic for duplicate SnowflakeConn.Close() calls
- Enable local time for proxy logging
- Have proxy summary statistics log average transfer rate
- Issue 40210: Remove duplicate poll interval loop in proxy
- Issue 40371: Prevent broker and proxy from rejecting clients without ICE candidates
- Issue 40392: Allow the proxy and probetest to set multiple STUN URLs
- Issue 40387: Fix error in probetest NAT check
- Fix proxy panic on invalid relayURL
- Set empty pattern if broker bridge-list is empty
- Improve documentation of Ephemeral[Min,Max]Port
- Fix resource leak and NAT check in probetest
- Fix memory leak from failed NAT check
- Improve NAT check logging
- Issue 40230: Send answer even if ICE gathering is not complete
- Improve broker error message on unknown bridge fingerprint
- Don't proxy private IP addresses
- Only accept ws:// and wss:// relay addresses
- Issue 40373: Add cli flag and SnowflakeProxy field to modify proxy poll interval
- Use %w not $v in fmt.Errorf
- Updates to documentation
- Adjust copy buffer size to improve proxy performance
- Improve descriptions of cli flags
- Cosmetic changes for code readability
- Issue 40367: Deduplicate prometheus metrics names
- Report the version of snowflake to the tor process
- Issue 40365: Indicate whether the repo was modified in the version string
- Simplify NAT checking logic
- Issue 40354: Use ptutil library for safelog and prometheus metrics
- Add cli flag to set a listen address for proxy prometheus metrics
- Issue 40345: Integrate docker image with release process
- Bump versions of dependencies

Changes in version v2.9.2 - 2024-03-18
- Issue 40288: Add integration testing with Shadow
- Issue 40345: Automatically build and push containers to our registry
- Issue 40339: Fix client ID reuse bug in SQS rendezvous
- Issue 40341: Modify SQS rendezvous arguments to use b64 encoded parameters
- Issue 40330: Add new metrics at the broker for per-country rendezvous stats
- Issue 40345: Update docker container tags
- Bump versions of dependencies

Changes in version v2.9.1 - 2024-02-27
- Issue 40335: Fix release job
- Change deprecated io/ioutil package to io package
- Bump versions of dependencies

Changes in version v2.9.0 - 2024-02-05
- Issue 40285: Add vcs revision to version string
- Issue 40294: Update recommended torrc options in client README
- Issue 40306: Scrub space-separated IP addresses
- Add proxy commandline option for probe server URL
- Use SetNet setting in probest to ignore net.Interfaces error
- Add probetest commandline option for STUN URL
- Issue 26151: Implement SQS rendezvous in client and broker
- Add broker metrics to track rendezvous method
- Cosmetic code quality fixes
- Bump versions of dependencies

Changes in version v2.8.1 - 2023-12-21
- Issue 40276: Reduce allocations in encapsulation.ReadData
- Issue 40310: Remove excessive logging for closed proxy connections
- Issue 40278: Add network fix for old version of android to proxy
- Bump versions of dependencies

Changes in version v2.8.0 - 2023-11-20
- Issue 40069: Add outbound proxy support
- Issue 40301: Fix for a bug in domain fronting configurations
- Issue 40302: Remove throughput summary from proxy logger
- Issue 40302: Change proxy stats logging to only log stats for traffic that occurred in the summary interval
- Update renovate bot configuration to use Go 1.21
- Bump versions of dependencies

Changes in version v2.7.0 - 2023-10-16
7142fa3 fix(proxy): Correctly close connection pipe when dealing with error
6393af6 Remove proxy churn measurements from broker.
a615e8b fix(proxy): remove _potential_ deadlock
d434549 Maintain backward compatability with old clients
9fdfb3d Randomly select front domain from comma-separated list
5cdf52c Update dependencies
1559963 chore(deps): update module github.com/xtaci/kcp-go/v5 to v5.6.3
60e66be Remove Golang 1.20 from CI Testing
1d069ca Update CI targets to test android from golang 1.21
3a050c6 Use ShouldBeNil to check for nil values
e45e8e5 chore(deps): update module github.com/smartystreets/goconvey to v1.8.1
f47ca18 chore(deps): update module gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib to v1.5.0
106da49 chore(deps): update module github.com/pion/webrtc/v3 to v3.2.20
2844ac6 Update CI targets to include only Go 1.20 and 1.21
f4e1ab9 chore(deps): update module golang.org/x/net to v0.15.0
caaff70 Update module golang.org/x/sys to v0.12.0

Changes in version v2.6.1 - 2023-09-11
- a3bfc28 Update module golang.org/x/crypto to v0.12.0
- e37e15a Update golang Docker tag to v1.21
- b632c7d Workaround for shadow in lieu of AF_NETLINK support
- 0cb2975 Update module golang.org/x/net to v0.13.0 [SECURITY]
- f73fe6e Keep the 'v' from the tag on the released .tar.gz
- 8104732 Change DefaultRelayURL back to wss://snowflake.torproject.net/.
- d932cb2 feat: add option to expose the stats by using metrics
- af73ab7 Add renovate config
- aaeab3f Update dependencies
- 58c3121 Close temporary UDPSession in TestQueuePacketConnWriteToKCP.
- 80980a3 Fix a comment left over from turbotunnel-quic.
- 08d1c6d Bump minimum required version of go

Changes in version v2.6.0 - 2023-06-19
- Issue 40243: Implement datachannel flow control at proxy
- Issue 40087: Append Let's Encrypt ISRG Root X1 to cert pool
- Issue 40198: Use IP_BIND_ADDRESS_NO_PORT when dialing the ORPort on linux
- Move from gitweb to gitlab
- Add warning log at broker when proxy does not connect with client
- Fix unit tests after SDP validation
- Soften non-critical log from error to warning
- Issue 40231: Validate SDP offers and answers
- Add scanner error check to ClusterCounter.Count
- Fix server benchmark tests
- Issue 40260: Use a sync.Pool to reuse QueuePacketConn buffers
- Issue 40043: Restore ListenAndServe error in server
- Update pion webrtc library versions
- Issue 40108: Add outbound address config option to proxy
- Issue 40260: Fix a data race in the Snowflake server
- Issue 40216: Add utls-imitate, utls-nosni documentation to the README
- Fix up/down traffic stats in standalone proxy
- Issue 40226: Filter out ICE servers that are not STUN
- Issue 40226: Update README to reflect the type of ICE servers we support
- Issue 40226: Parse ICE servers using the pion/ice library function
- Bring client torrc up to date with Tor Browser

Changes in version v2.5.1 - 2023-01-18
- Issue 40249: Fix issue with Skip Hello Verify patch

Changes in version v2.5.0 - 2023-01-18
- Issue 40249: Apply Skip Hello Verify Migration

Changes in version v2.4.3 - 2023-01-16
- Fix version number in version.go

Changes in version v2.4.2 - 2023-01-13
- Issue 40208: Enhance help info for capacity flag
- Issue 40232: Update README and fix help output
- Issue 40173: Increase clientIDAddrMapCapacity
- Issue 40177: Manually unlock mutex in ClientMap.SendQueue
- Issue 40177: Have SnowflakeClientConn implement io.WriterTo
- Issue 40179: Reduce turbotunnel queueSize from 2048 to 512
- Issue 40187/40199: Take ownership of buffer in QueuePacketConn QueueIncoming/WriteTo
- Add more tests for URL encoded IPs (safelog)
- Fix server flag name
- Issue 40200: Use multiple parallel KCP state machines in the server
- Add a num-turbotunnel server transport option
- Issue: 40241: Switch default proxy STUN server to stun.l.google.com

Changes in version v2.4.1 - 2022-12-01
- Issue 40224: Bug fix in utls roundtripper

Changes in version v2.4.0 - 2022-11-29
- Fix proxy command line help output
- Issue 40123: Reduce multicast DNS candidates
- Add ICE ephemeral ports range setting
- Reformat using Go 1.19
- Update CI tests to include latest and minimum Go versions
- Issue 40184: Use fixed unit for bandwidth logging
- Update gorilla/websocket to v1.5.0
- Issue 40175: Server performance improvements
- Issue 40183: Change snowflake proxy log verbosity
- Issue 40117: Display proxy NAT type in logs
- Issue 40198: Add a `orport-srcaddr` server transport option
- Add gofmt output to CI test
- Issue 40185:  Change bandwidth type from int to int64 to prevent overflow
- Add version output support to snowflake
- Issue 40229: Change regexes for ipv6 addresses to catch url-encoded addresses
- Issue 40220: Close stale connections in standalone proxy

Changes in version v2.3.0 - 2022-06-23
- Issue 40146: Avoid performing two NAT probe tests at startup
- Issue 40134: Log messages from client NAT check failures are confusing
- Issue 34075: Implement metrics to measure snowflake churn
- Issue 28651: Prepare all pieces of the snowflake pipeline for a second snowflake bridge
- Issue 40129: Distributed Snowflake Server Support

Changes in version v2.2.0 - 2022-05-25

- Issue 40099: Initialize SnowflakeListener.closed
- Add connection failure events for proxy timeouts
- Issue 40103: Fix proxy logging verb tense
- Fix up and downstream metrics output for proxy
- Issue 40054: uTLS for broker negotiation
- Forward bridge fingerprint from client to broker (WIP, Issue 28651)
- Issue 40104: Make it easier to configure proxy type
- Remove version from ClientPollRequest
- Issue 40124: Move tor-specific code out of library
- Issue 40115: Scrub pt event logs
- Issue 40127: Bump webrtc and dtls library versions
- Bump version of webrtc and dtls to fix dtls CVEs
- Issue 40141: Ensure library calls of events can be scrubbed

Changes in version v2.1.0 - 2022-02-08

- Issue 40098: Remove support for legacy one shot mode
- Issue 40079: Make connection summary at proxy privacy preserving
- Issue 40076: Add snowflake event API for notifications of connection events
- Issue 40084: Increase capacity of client address map at the server
- Issue 40060: Further clean up snowflake server logs
- Issue 40089: Validate proxy and client supplied strings at broker
- Issue 40014: Update version of DTLS library to include fingerprinting fixes
- Issue 40075: Support recurring NAT type check in standalone proxy


Changes in version v2.0.0 - 2021-11-04

- Turn the standalone snowflake proxy code into a library
- Clean up and reworked the snowflake client and server library code
- Unify broker/bridge domains to *.torproject.net
- Updates to the snowflake library documentation
- New package functions to define and set a rendezvous method with the
broker
- Factor out the broker geoip code into its own external library
- Bug fix to check error calls in preparePeerConnection
- Bug fixes in snowflake tests
- Issue 40059: add the ability to pass in snowflake arguments through SOCKS
- Increase buffer sizes for sending and receiving snowflake data
- Issue 25985: rendezvous with the broker using AMP cache
- Issue 40055: wait for the full poll interval between proxy polls

Changes in version v1.1.0 - 2021-07-13

- Refactors of the Snowflake broker code
- Refactors of the Snowflake proxy code
- Issue 40048: assign proxies based on self-reported client load
- Issue 40052: fixed a memory leak in the server accept loop
- Version bump of kcp and smux libraries
- Bug fix to pass the correct client address to the Snowflake bridge metrics
counter
- Bug fixes to prevent race conditions in the Snowflake client 

Changes in version v1.0.0 - 2021-06-07

- Initial release.
07070100000006000081A400000000000000000000000167D9BD4E0000040A000000000000000000000000000000000000001C00000000snowflake-2.11.0/DockerfileFROM docker.io/library/golang:1.23-bookworm AS build

# Set some labels
# io.containers.autoupdate label will instruct podman to reach out to the corres
# corresponding registry to check if the image has been updated. If an image
# must be updated, Podman pulls it down and restarts the systemd unit executing
# the container. See podman-auto-update(1) for more details, or
# https://docs.podman.io/en/latest/markdown/podman-auto-update.1.html
LABEL io.containers.autoupdate=registry
LABEL org.opencontainers.image.authors="anti-censorship-team@lists.torproject.org"

RUN apt-get update && apt-get install -y tor-geoipdb

ADD . /app

WORKDIR /app/proxy
RUN go get
RUN CGO_ENABLED=0 go build -o proxy -ldflags '-extldflags "-static" -w -s'  .

FROM scratch

COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=build /usr/share/zoneinfo /usr/share/zoneinfo
COPY --from=build /usr/share/tor/geoip* /usr/share/tor/
COPY --from=build /app/proxy/proxy /bin/proxy

ENTRYPOINT [ "/bin/proxy" ]
07070100000007000081A400000000000000000000000167D9BD4E0000071E000000000000000000000000000000000000001900000000snowflake-2.11.0/LICENSE              This file contains the license for "Snowflake"
     a free software project which provides a WebRTC pluggable transport.

================================================================================
Copyright (c) 2016, Serene Han, Arlo Breault
Copyright (c) 2019-2020, The Tor Project, Inc

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:

  * Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.

  * Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.

  * Neither the names of the copyright owners nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
07070100000008000081A400000000000000000000000167D9BD4E000013F5000000000000000000000000000000000000001B00000000snowflake-2.11.0/README.md# Snowflake

Snowflake is a censorship-evasion pluggable transport using WebRTC, inspired by Flashproxy.

<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Structure of this Repository](#structure-of-this-repository)
- [Usage](#usage)
  - [Using Snowflake with Tor](#using-snowflake-with-tor)
  - [Running a Snowflake Proxy](#running-a-snowflake-proxy)
  - [Using the Snowflake Library with Other Applications](#using-the-snowflake-library-with-other-applications)
- [Test Environment](#test-environment)
- [FAQ](#faq)
- [More info and links](#more-info-and-links)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

### Structure of this Repository

- `broker/` contains code for the Snowflake broker
- `doc/` contains Snowflake documentation and manpages
- `client/` contains the Tor pluggable transport client and client library code
- `common/` contains generic libraries used by multiple pieces of Snowflake
- `proxy/` contains code for the Go standalone Snowflake proxy
- `probetest/` contains code for a NAT probetesting service
- `server/` contains the Tor pluggable transport server and server library code

### Usage

Snowflake is currently deployed as a pluggable transport for Tor.

#### Using Snowflake with Tor

To use the Snowflake client with Tor, you will need to add the appropriate `Bridge` and `ClientTransportPlugin` lines to your [torrc](https://2019.www.torproject.org/docs/tor-manual.html.en) file. See the [client README](client) for more information on building and running the Snowflake client.

#### Running a Snowflake Proxy

You can contribute to Snowflake by running a Snowflake proxy. We have the option to run a proxy in your browser or as a standalone Go program. See our [community documentation](https://community.torproject.org/relay/setup/snowflake/) for more details. 

#### Using the Snowflake Library with Other Applications

Snowflake can be used as a Go API, and adheres to the [v2.1 pluggable transports specification](). For more information on using the Snowflake Go library, see the [Snowflake library documentation](doc/using-the-snowflake-library.md).

### Test Environment

There is a Docker-based test environment at https://github.com/cohosh/snowbox.

### FAQ

**Q: How does it work?**

In the Tor use-case:

1. Volunteers visit websites that host the 'snowflake' proxy, run a snowflake [web extension](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext), or use a standalone proxy.
2. Tor clients automatically find available browser proxies via the Broker
(the domain fronted signaling channel).
3. Tor client and browser proxy establish a WebRTC peer connection.
4. Proxy connects to some relay.
5. Tor occurs.

More detailed information about how clients, snowflake proxies, and the Broker
fit together on the way...

**Q: What are the benefits of this PT compared with other PTs?**

Snowflake combines the advantages of flashproxy and meek. Primarily:

- It has the convenience of Meek, but can support magnitudes more
users with negligible CDN costs. (Domain fronting is only used for brief
signalling / NAT-piercing to setup the P2P WebRTC DataChannels which handle
the actual traffic.)

- Arbitrarily high numbers of volunteer proxies are possible like in
flashproxy, but NATs are no longer a usability barrier - no need for
manual port forwarding!

**Q: Why is this called Snowflake?**

It utilizes the "ICE" negotiation via WebRTC, and also involves a great
abundance of ephemeral and short-lived (and special!) volunteer proxies...

### More info and links

We have more documentation in the [Snowflake wiki](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/home) and at https://snowflake.torproject.org/.


##### -- Android AAR Reproducible Build Setup  --

Using `gomobile` it is possible to build snowflake as shared libraries for all
the architectures supported by Android.  This is in the _.gitlab-ci.yml_, which
runs in GitLab CI.  It is also possible to run this setup in a Virtual Machine
using [vagrant](https://www.vagrantup.com/).  Just run `vagrant up` and it will
create and provision the VM.  `vagrant ssh` to get into the VM to use it as a
development environment.

##### uTLS Settings

Snowflake communicate with broker that serves as signaling server with TLS based domain fronting connection, which may be identified by its usage of Go language TLS stack.

uTLS is a software library designed to initiate the TLS Client Hello fingerprint of browsers or other popular software's TLS stack to evade censorship based on TLS client hello fingerprint with `-utls-imitate` . You can use `-version` to see a list of supported values.

Depending on client and server configuration, it may not always work as expected as not all extensions are correctly implemented.

You can also remove SNI (Server Name Indication) from client hello to evade censorship with `-utls-nosni`, not all servers supports this.
07070100000009000081A400000000000000000000000167D9BD4E00000963000000000000000000000000000000000000001D00000000snowflake-2.11.0/Vagrantfilerequire 'pathname'
require 'tempfile'
require 'yaml'

srvpath = Pathname.new(File.dirname(__FILE__)).realpath
configfile = YAML.load_file(File.join(srvpath, "/.gitlab-ci.yml"))
remote_url = 'https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake'

# set up essential environment variables
env = configfile['variables']
env = env.merge(configfile['android']['variables'])
env['CI_PROJECT_DIR'] = '/builds/tpo/anti-censorship/pluggable-transports/snowflake'
env_file = Tempfile.new('env')
File.chmod(0644, env_file.path)
env.each do |k,v|
    env_file.write("export #{k}='#{v}'\n")
end
env_file.rewind

sourcepath = '/etc/profile.d/env.sh'
header = "#!/bin/bash -ex\nsource #{sourcepath}\ncd $CI_PROJECT_DIR\n"

before_script_file = Tempfile.new('before_script')
File.chmod(0755, before_script_file.path)
before_script_file.write(header)
configfile['android']['before_script'].flatten.each do |line|
    before_script_file.write(line)
    before_script_file.write("\n")
end
before_script_file.rewind

script_file = Tempfile.new('script')
File.chmod(0755, script_file.path)
script_file.write(header)
configfile['android']['script'].flatten.each do |line|
    script_file.write(line)
    script_file.write("\n")
end
script_file.rewind

Vagrant.configure("2") do |config|
  config.vm.box = "debian/bullseye64"
  config.vm.synced_folder '.', '/vagrant', disabled: true
  config.vm.provision "file", source: env_file.path, destination: 'env.sh'
  config.vm.provision :shell, inline: <<-SHELL
    set -ex
    mv ~vagrant/env.sh #{sourcepath}
    source #{sourcepath}
    test -d /go || mkdir /go
    mkdir -p $(dirname $CI_PROJECT_DIR)
    chown -R vagrant.vagrant $(dirname $CI_PROJECT_DIR)
    apt-get update
    apt-get -qy install --no-install-recommends git
    git clone #{remote_url} $CI_PROJECT_DIR
    chmod -R a+rX,u+w /go $CI_PROJECT_DIR
    chown -R vagrant.vagrant /go $CI_PROJECT_DIR
SHELL
  config.vm.provision "file", source: before_script_file.path, destination: 'before_script.sh'
  config.vm.provision "file", source: script_file.path, destination: 'script.sh'
  config.vm.provision :shell, inline: '/home/vagrant/before_script.sh'
  config.vm.provision :shell, privileged: false, inline: '/home/vagrant/script.sh'

  # remove this or comment it out to use VirtualBox instead of libvirt
  config.vm.provider :libvirt do |libvirt|
    libvirt.memory = 1536
  end
end
0707010000000A000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001800000000snowflake-2.11.0/broker0707010000000B000081A400000000000000000000000167D9BD4E00000882000000000000000000000000000000000000002200000000snowflake-2.11.0/broker/README.md<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Overview](#overview)
- [Running your own](#running-your-own)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

This is the Broker component of Snowflake.

### Overview

The Broker handles the rendezvous by matching Snowflake
Clients with Proxies, and passing their WebRTC Session Descriptions
(the "signaling" step). This allows Clients and Proxies to establish
a Peer connection.

It is analogous to Flashproxy's
[Facilitator](https://trac.torproject.org/projects/tor/wiki/FlashProxyFAQ),
but bidirectional and domain-fronted.

The Broker expects:

- Clients to send their SDP offer in a POST request, which will then block
  until the Broker responds with the answer of the matched Proxy.
- Proxies to announce themselves with a POST request, to which the Broker
  responds with some Client's SDP offer. The Proxy should then send a second
  POST request soon after containing its SDP answer, which the Broker passes
  back to the same Client.

### Running your own

The server uses TLS by default.
There is a `--disable-tls` option for testing purposes,
but you should use TLS in production.

The server automatically fetches certificates
from [Let's Encrypt](https://en.wikipedia.org/wiki/Let's_Encrypt) as needed.
Use the `--acme-hostnames` option to tell the server
what hostnames it may request certificates for.
You can optionally provide a contact email address,
using the `--acme-email` option,
so that Let's Encrypt can inform you of any problems.

In order to fetch certificates automatically,
the server needs to open an additional HTTP listener on port 80.
On Linux, you can use the `setcap` program,
part of libcap2, to enable the broker to bind to low-numbered ports
without having to run as root:
```
setcap 'cap_net_bind_service=+ep' /usr/local/bin/broker
```
You can control the listening broker port with the --addr option.
Port 443 is the default.

You'll need to provide the URL of the custom broker
to the client plugin using the `--url $URL` flag.
0707010000000C000081A400000000000000000000000167D9BD4E00000ACB000000000000000000000000000000000000001F00000000snowflake-2.11.0/broker/amp.gopackage main

import (
	"log"
	"net/http"
	"strings"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

// ampClientOffers is the AMP-speaking endpoint for client poll messages,
// intended for access via an AMP cache. In contrast to the other clientOffers,
// the client's encoded poll message is stored in the URL path rather than the
// HTTP request body (because an AMP cache does not support POST), and the
// encoded client poll response is sent back as AMP-armored HTML.
func ampClientOffers(i *IPC, w http.ResponseWriter, r *http.Request) {
	// The encoded client poll message immediately follows the /amp/client/
	// path prefix, so this function unfortunately needs to be aware of and
	// remote its own routing prefix.
	path := strings.TrimPrefix(r.URL.Path, "/amp/client/")
	if path == r.URL.Path {
		// The path didn't start with the expected prefix. This probably
		// indicates an internal bug.
		log.Println("ampClientOffers: unexpected prefix in path")
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	var encPollReq []byte
	var response []byte
	var err error

	encPollReq, err = amp.DecodePath(path)
	if err == nil {
		arg := messages.Arg{
			Body:             encPollReq,
			RemoteAddr:       util.GetClientIp(r),
			RendezvousMethod: messages.RendezvousAmpCache,
		}
		err = i.ClientOffers(arg, &response)
	} else {
		response, err = (&messages.ClientPollResponse{
			Error: "cannot decode URL path",
		}).EncodePollResponse()
	}

	if err != nil {
		// We couldn't even construct a JSON object containing an error
		// message :( Nothing to do but signal an error at the HTTP
		// layer. The AMP cache will translate this 500 status into a
		// 404 status.
		// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#redirect-%26-error-handling
		log.Printf("ampClientOffers: %v", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	w.Header().Set("Content-Type", "text/html")
	// Attempt to hint to an AMP cache not to waste resources caching this
	// document. "The Google AMP Cache considers any document fresh for at
	// least 15 seconds."
	// https://developers.google.com/amp/cache/overview#google-amp-cache-updates
	w.Header().Set("Cache-Control", "max-age=15")
	w.WriteHeader(http.StatusOK)

	enc, err := amp.NewArmorEncoder(w)
	if err != nil {
		log.Printf("amp.NewArmorEncoder: %v", err)
		return
	}
	defer enc.Close()

	if _, err := enc.Write(response); err != nil {
		log.Printf("ampClientOffers: unable to write answer: %v", err)
	}
}
0707010000000D000081A400000000000000000000000167D9BD4E00000BB5000000000000000000000000000000000000002700000000snowflake-2.11.0/broker/bridge-list.go/* (*BridgeListHolderFileBased).LoadBridgeInfo loads a Snowflake Server bridge info description file,
   its format is as follows:

   This file should be in newline-delimited JSON format(https://jsonlines.org/).
   For each line, the format of json data should be in the format of:
   {"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.net/", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}

   displayName:string is the name of this bridge. This value is not currently used programmatically.

   webSocketAddress:string is the WebSocket URL of this bridge.
   This will be the address proxy used to connect to this snowflake server.

   fingerprint:string is the identifier of the bridge.
   This will be used by a client to identify the bridge it wishes to connect to.

   The existence of ANY other fields is NOT permitted.

   The file will be considered invalid if there is at least one invalid json record.
   In this case, an error will be returned, and none of the records will be loaded.
*/

package main

import (
	"bufio"
	"bytes"
	"encoding/json"
	"errors"
	"io"
	"sync"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
)

var ErrBridgeNotFound = errors.New("bridge with requested fingerprint is unknown to the broker")

func NewBridgeListHolder() BridgeListHolderFileBased {
	return &bridgeListHolder{}
}

type bridgeListHolder struct {
	bridgeInfo       map[bridgefingerprint.Fingerprint]BridgeInfo
	accessBridgeInfo sync.RWMutex
}

type BridgeListHolder interface {
	GetBridgeInfo(bridgefingerprint.Fingerprint) (BridgeInfo, error)
}

type BridgeListHolderFileBased interface {
	BridgeListHolder
	LoadBridgeInfo(reader io.Reader) error
}

type BridgeInfo struct {
	DisplayName      string `json:"displayName"`
	WebSocketAddress string `json:"webSocketAddress"`
	Fingerprint      string `json:"fingerprint"`
}

func (h *bridgeListHolder) GetBridgeInfo(fingerprint bridgefingerprint.Fingerprint) (BridgeInfo, error) {
	h.accessBridgeInfo.RLock()
	defer h.accessBridgeInfo.RUnlock()
	if bridgeInfo, ok := h.bridgeInfo[fingerprint]; ok {
		return bridgeInfo, nil
	}
	return BridgeInfo{}, ErrBridgeNotFound
}

func (h *bridgeListHolder) LoadBridgeInfo(reader io.Reader) error {
	bridgeInfoMap := map[bridgefingerprint.Fingerprint]BridgeInfo{}
	inputScanner := bufio.NewScanner(reader)
	for inputScanner.Scan() {
		inputLine := inputScanner.Bytes()
		bridgeInfo := BridgeInfo{}
		decoder := json.NewDecoder(bytes.NewReader(inputLine))
		decoder.DisallowUnknownFields()
		if err := decoder.Decode(&bridgeInfo); err != nil {
			return err
		}

		var bridgeFingerprint bridgefingerprint.Fingerprint
		var err error
		if bridgeFingerprint, err = bridgefingerprint.FingerprintFromHexString(bridgeInfo.Fingerprint); err != nil {
			return err
		}

		bridgeInfoMap[bridgeFingerprint] = bridgeInfo
	}
	h.accessBridgeInfo.Lock()
	defer h.accessBridgeInfo.Unlock()
	h.bridgeInfo = bridgeInfoMap
	return nil
}
0707010000000E000081A400000000000000000000000167D9BD4E00000DCE000000000000000000000000000000000000002C00000000snowflake-2.11.0/broker/bridge-list_test.gopackage main

import (
	"bytes"
	"encoding/hex"
	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
	"testing"
)

const DefaultBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
`

const ImaginaryBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
{"displayName":"imaginary-1", "webSocketAddress":"wss://imaginary-1-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B00"}
{"displayName":"imaginary-2", "webSocketAddress":"wss://imaginary-2-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B01"}
{"displayName":"imaginary-3", "webSocketAddress":"wss://imaginary-3-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B02"}
{"displayName":"imaginary-4", "webSocketAddress":"wss://imaginary-4-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B03"}
{"displayName":"imaginary-5", "webSocketAddress":"wss://imaginary-5-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B04"}
{"displayName":"imaginary-6", "webSocketAddress":"wss://imaginary-6-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B05"}
{"displayName":"imaginary-7", "webSocketAddress":"wss://imaginary-7-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B06"}
{"displayName":"imaginary-8", "webSocketAddress":"wss://imaginary-8-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B07"}
{"displayName":"imaginary-9", "webSocketAddress":"wss://imaginary-9-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B08"}
{"displayName":"imaginary-10", "webSocketAddress":"wss://imaginary-10-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B09"}
`

func TestBridgeLoad(t *testing.T) {
	Convey("load default list", t, func() {
		bridgeList := NewBridgeListHolder()
		So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(DefaultBridges))), ShouldBeNil)
		{
			bridgeFingerprint := [20]byte{}
			{
				n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80A72"))
				So(n, ShouldEqual, 20)
				So(err, ShouldBeNil)
			}
			Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:])
			So(err, ShouldBeNil)
			bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint)
			So(err, ShouldBeNil)
			So(bridgeInfo.DisplayName, ShouldEqual, "default")
			So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://snowflake.torproject.org")
		}
	})
	Convey("load imaginary list", t, func() {
		bridgeList := NewBridgeListHolder()
		So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(ImaginaryBridges))), ShouldBeNil)
		{
			bridgeFingerprint := [20]byte{}
			{
				n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80B07"))
				So(n, ShouldEqual, 20)
				So(err, ShouldBeNil)
			}
			Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:])
			So(err, ShouldBeNil)
			bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint)
			So(err, ShouldBeNil)
			So(bridgeInfo.DisplayName, ShouldEqual, "imaginary-8")
			So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://imaginary-8-snowflake.torproject.org")
		}
	})
}
0707010000000F000081A400000000000000000000000167D9BD4E00003323000000000000000000000000000000000000002200000000snowflake-2.11.0/broker/broker.go/*
Broker acts as the HTTP signaling channel.
It matches clients and snowflake proxies by passing corresponding
SessionDescriptions in order to negotiate a WebRTC connection.
*/
package main

import (
	"bytes"
	"container/heap"
	"context"
	"crypto/tls"
	"flag"
	"io"
	"log"
	"net/http"
	"os"
	"os/signal"
	"strings"
	"sync"
	"syscall"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"

	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/service/sqs"
	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/promhttp"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/namematcher"
	"golang.org/x/crypto/acme/autocert"
)

type BrokerContext struct {
	snowflakes           *SnowflakeHeap
	restrictedSnowflakes *SnowflakeHeap
	// Maps keeping track of snowflakeIDs required to match SDP answers from
	// the second http POST. Restricted snowflakes can only be matched up with
	// clients behind an unrestricted NAT.
	idToSnowflake map[string]*Snowflake
	// Synchronization for the snowflake map and heap
	snowflakeLock sync.Mutex
	proxyPolls    chan *ProxyPoll
	metrics       *Metrics

	bridgeList                     BridgeListHolderFileBased
	allowedRelayPattern            string
	presumedPatternForLegacyClient string
}

func (ctx *BrokerContext) GetBridgeInfo(fingerprint bridgefingerprint.Fingerprint) (BridgeInfo, error) {
	return ctx.bridgeList.GetBridgeInfo(fingerprint)
}

func NewBrokerContext(
	metricsLogger *log.Logger,
	allowedRelayPattern,
	presumedPatternForLegacyClient string,
) *BrokerContext {
	snowflakes := new(SnowflakeHeap)
	heap.Init(snowflakes)
	rSnowflakes := new(SnowflakeHeap)
	heap.Init(rSnowflakes)
	metrics, err := NewMetrics(metricsLogger)

	if err != nil {
		panic(err.Error())
	}

	if metrics == nil {
		panic("Failed to create metrics")
	}

	bridgeListHolder := NewBridgeListHolder()

	const DefaultBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.net/", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
`
	bridgeListHolder.LoadBridgeInfo(bytes.NewReader([]byte(DefaultBridges)))

	return &BrokerContext{
		snowflakes:                     snowflakes,
		restrictedSnowflakes:           rSnowflakes,
		idToSnowflake:                  make(map[string]*Snowflake),
		proxyPolls:                     make(chan *ProxyPoll),
		metrics:                        metrics,
		bridgeList:                     bridgeListHolder,
		allowedRelayPattern:            allowedRelayPattern,
		presumedPatternForLegacyClient: presumedPatternForLegacyClient,
	}
}

// Proxies may poll for client offers concurrently.
type ProxyPoll struct {
	id           string
	proxyType    string
	natType      string
	clients      int
	offerChannel chan *ClientOffer
}

// Registers a Snowflake and waits for some Client to send an offer,
// as part of the polling logic of the proxy handler.
func (ctx *BrokerContext) RequestOffer(id string, proxyType string, natType string, clients int) *ClientOffer {
	request := new(ProxyPoll)
	request.id = id
	request.proxyType = proxyType
	request.natType = natType
	request.clients = clients
	request.offerChannel = make(chan *ClientOffer)
	ctx.proxyPolls <- request
	// Block until an offer is available, or timeout which sends a nil offer.
	offer := <-request.offerChannel
	return offer
}

// goroutine which matches clients to proxies and sends SDP offers along.
// Safely processes proxy requests, responding to them with either an available
// client offer or nil on timeout / none are available.
func (ctx *BrokerContext) Broker() {
	for request := range ctx.proxyPolls {
		snowflake := ctx.AddSnowflake(request.id, request.proxyType, request.natType, request.clients)
		// Wait for a client to avail an offer to the snowflake.
		go func(request *ProxyPoll) {
			select {
			case offer := <-snowflake.offerChannel:
				request.offerChannel <- offer
			case <-time.After(time.Second * ProxyTimeout):
				// This snowflake is no longer available to serve clients.
				ctx.snowflakeLock.Lock()
				defer ctx.snowflakeLock.Unlock()
				if snowflake.index != -1 {
					if request.natType == NATUnrestricted {
						heap.Remove(ctx.snowflakes, snowflake.index)
					} else {
						heap.Remove(ctx.restrictedSnowflakes, snowflake.index)
					}
					ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": request.natType, "type": request.proxyType}).Dec()
					delete(ctx.idToSnowflake, snowflake.id)
					close(request.offerChannel)
				}
			}
		}(request)
	}
}

// Create and add a Snowflake to the heap.
// Required to keep track of proxies between providing them
// with an offer and awaiting their second POST with an answer.
func (ctx *BrokerContext) AddSnowflake(id string, proxyType string, natType string, clients int) *Snowflake {
	snowflake := new(Snowflake)
	snowflake.id = id
	snowflake.clients = clients
	snowflake.proxyType = proxyType
	snowflake.natType = natType
	snowflake.offerChannel = make(chan *ClientOffer)
	snowflake.answerChannel = make(chan string)
	ctx.snowflakeLock.Lock()
	if natType == NATUnrestricted {
		heap.Push(ctx.snowflakes, snowflake)
	} else {
		heap.Push(ctx.restrictedSnowflakes, snowflake)
	}
	ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
	ctx.idToSnowflake[id] = snowflake
	ctx.snowflakeLock.Unlock()
	return snowflake
}

func (ctx *BrokerContext) InstallBridgeListProfile(reader io.Reader) error {
	if err := ctx.bridgeList.LoadBridgeInfo(reader); err != nil {
		return err
	}
	return nil
}

func (ctx *BrokerContext) CheckProxyRelayPattern(pattern string, nonSupported bool) bool {
	if nonSupported {
		pattern = ctx.presumedPatternForLegacyClient
	}
	proxyPattern := namematcher.NewNameMatcher(pattern)
	brokerPattern := namematcher.NewNameMatcher(ctx.allowedRelayPattern)
	return proxyPattern.IsSupersetOf(brokerPattern)
}

// Client offer contains an SDP, bridge fingerprint and the NAT type of the client
type ClientOffer struct {
	natType     string
	sdp         []byte
	fingerprint []byte
}

func main() {
	var acmeEmail string
	var acmeHostnamesCommas string
	var acmeCertCacheDir string
	var addr string
	var geoipDatabase string
	var geoip6Database string
	var bridgeListFilePath, allowedRelayPattern, presumedPatternForLegacyClient string
	var brokerSQSQueueName, brokerSQSQueueRegion string
	var disableTLS bool
	var certFilename, keyFilename string
	var disableGeoip bool
	var metricsFilename string
	var unsafeLogging bool

	flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications")
	flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate")
	flag.StringVar(&certFilename, "cert", "", "TLS certificate file")
	flag.StringVar(&keyFilename, "key", "", "TLS private key file")
	flag.StringVar(&acmeCertCacheDir, "acme-cert-cache", "acme-cert-cache", "directory in which certificates should be cached")
	flag.StringVar(&addr, "addr", ":443", "address to listen on")
	flag.StringVar(&geoipDatabase, "geoipdb", "/usr/share/tor/geoip", "path to correctly formatted geoip database mapping IPv4 address ranges to country codes")
	flag.StringVar(&geoip6Database, "geoip6db", "/usr/share/tor/geoip6", "path to correctly formatted geoip database mapping IPv6 address ranges to country codes")
	flag.StringVar(&bridgeListFilePath, "bridge-list-path", "", "file path for bridgeListFile")
	flag.StringVar(&allowedRelayPattern, "allowed-relay-pattern", "", "allowed pattern for relay host name. The broker will reject proxies whose AcceptedRelayPattern is more restrictive than this")
	flag.StringVar(&presumedPatternForLegacyClient, "default-relay-pattern", "", "presumed pattern for legacy client")
	flag.StringVar(&brokerSQSQueueName, "broker-sqs-name", "", "name of broker SQS queue to listen for incoming messages on")
	flag.StringVar(&brokerSQSQueueRegion, "broker-sqs-region", "", "name of AWS region of broker SQS queue")
	flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS")
	flag.BoolVar(&disableGeoip, "disable-geoip", false, "don't use geoip for stats collection")
	flag.StringVar(&metricsFilename, "metrics-log", "", "path to metrics logging output")
	flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed")
	flag.Parse()

	var metricsFile io.Writer
	var logOutput io.Writer = os.Stderr
	if unsafeLogging {
		log.SetOutput(logOutput)
	} else {
		// We want to send the log output through our scrubber first
		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
	}

	log.SetFlags(log.LstdFlags | log.LUTC)

	if metricsFilename != "" {
		var err error
		metricsFile, err = os.OpenFile(metricsFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)

		if err != nil {
			log.Fatal(err.Error())
		}
	} else {
		metricsFile = os.Stdout
	}

	metricsLogger := log.New(metricsFile, "", 0)

	ctx := NewBrokerContext(metricsLogger, allowedRelayPattern, presumedPatternForLegacyClient)

	if bridgeListFilePath != "" {
		bridgeListFile, err := os.Open(bridgeListFilePath)
		if err != nil {
			log.Fatal(err.Error())
		}
		err = ctx.InstallBridgeListProfile(bridgeListFile)
		if err != nil {
			log.Fatal(err.Error())
		}
	}

	if !disableGeoip {
		err := ctx.metrics.LoadGeoipDatabases(geoipDatabase, geoip6Database)
		if err != nil {
			log.Fatal(err.Error())
		}
	}

	go ctx.Broker()

	i := &IPC{ctx}

	http.HandleFunc("/robots.txt", robotsTxtHandler)

	http.Handle("/proxy", SnowflakeHandler{i, proxyPolls})
	http.Handle("/client", SnowflakeHandler{i, clientOffers})
	http.Handle("/answer", SnowflakeHandler{i, proxyAnswers})
	http.Handle("/debug", SnowflakeHandler{i, debugHandler})
	http.Handle("/metrics", MetricsHandler{metricsFilename, metricsHandler})
	http.Handle("/prometheus", promhttp.HandlerFor(ctx.metrics.promMetrics.registry, promhttp.HandlerOpts{}))

	http.Handle("/amp/client/", SnowflakeHandler{i, ampClientOffers})

	server := http.Server{
		Addr: addr,
	}

	// Run SQS Handler to continuously poll and process messages from SQS
	if brokerSQSQueueName != "" && brokerSQSQueueRegion != "" {
		log.Printf("Loading SQSHandler using SQS Queue %s in region %s\n", brokerSQSQueueName, brokerSQSQueueRegion)
		sqsHandlerContext := context.Background()
		cfg, err := config.LoadDefaultConfig(sqsHandlerContext, config.WithRegion(brokerSQSQueueRegion))
		if err != nil {
			log.Fatal(err)
		}
		client := sqs.NewFromConfig(cfg)
		sqsHandler, err := newSQSHandler(sqsHandlerContext, client, brokerSQSQueueName, brokerSQSQueueRegion, i)
		if err != nil {
			log.Fatal(err)
		}
		go sqsHandler.PollAndHandleMessages(sqsHandlerContext)
	}

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGHUP)

	// go routine to handle a SIGHUP signal to allow the broker operator to send
	// a SIGHUP signal when the geoip database files are updated, without requiring
	// a restart of the broker
	go func() {
		for {
			signal := <-sigChan
			log.Printf("Received signal: %s. Reloading geoip databases.", signal)
			if err := ctx.metrics.LoadGeoipDatabases(geoipDatabase, geoip6Database); err != nil {
				log.Fatalf("reload of Geo IP databases on signal %s returned error: %v", signal, err)
			}
		}
	}()

	// Handle the various ways of setting up TLS. The legal configurations
	// are:
	//   --acme-hostnames (with optional --acme-email and/or --acme-cert-cache)
	//   --cert and --key together
	//   --disable-tls
	// The outputs of this block of code are the disableTLS,
	// needHTTP01Listener, certManager, and getCertificate variables.
	var err error
	if acmeHostnamesCommas != "" {
		acmeHostnames := strings.Split(acmeHostnamesCommas, ",")
		log.Printf("ACME hostnames: %q", acmeHostnames)

		var cache autocert.Cache
		if err := os.MkdirAll(acmeCertCacheDir, 0700); err != nil {
			log.Printf("Warning: Couldn't create cache directory %q (reason: %s) so we're *not* using our certificate cache.", acmeCertCacheDir, err)
		} else {
			cache = autocert.DirCache(acmeCertCacheDir)
		}

		certManager := autocert.Manager{
			Cache:      cache,
			Prompt:     autocert.AcceptTOS,
			HostPolicy: autocert.HostWhitelist(acmeHostnames...),
			Email:      acmeEmail,
		}
		go func() {
			log.Printf("Starting HTTP-01 listener")
			log.Fatal(http.ListenAndServe(":80", certManager.HTTPHandler(nil)))
		}()

		server.TLSConfig = &tls.Config{GetCertificate: certManager.GetCertificate}
		err = server.ListenAndServeTLS("", "")
	} else if certFilename != "" && keyFilename != "" {
		if acmeEmail != "" || acmeHostnamesCommas != "" {
			log.Fatalf("The --cert and --key options are not allowed with --acme-email or --acme-hostnames.")
		}
		err = server.ListenAndServeTLS(certFilename, keyFilename)
	} else if disableTLS {
		err = server.ListenAndServe()
	} else {
		log.Fatal("the --acme-hostnames, --cert and --key, or --disable-tls option is required")
	}

	if err != nil {
		log.Fatal(err)
	}
}
07070100000010000081A400000000000000000000000167D9BD4E0000196C000000000000000000000000000000000000002000000000snowflake-2.11.0/broker/http.gopackage main

import (
	"bytes"
	"errors"
	"fmt"
	"io"
	"log"
	"net/http"
	"os"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

const (
	readLimit = 100000 // Maximum number of bytes to be read from an HTTP request
)

// Implements the http.Handler interface
type SnowflakeHandler struct {
	*IPC
	handle func(*IPC, http.ResponseWriter, *http.Request)
}

func (sh SnowflakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Access-Control-Allow-Origin", "*")
	w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID")
	// Return early if it's CORS preflight.
	if "OPTIONS" == r.Method {
		return
	}
	sh.handle(sh.IPC, w, r)
}

// Implements the http.Handler interface
type MetricsHandler struct {
	logFilename string
	handle      func(string, http.ResponseWriter, *http.Request)
}

func (mh MetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Access-Control-Allow-Origin", "*")
	w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID")
	// Return early if it's CORS preflight.
	if "OPTIONS" == r.Method {
		return
	}
	mh.handle(mh.logFilename, w, r)
}

func robotsTxtHandler(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
	if _, err := w.Write([]byte("User-agent: *\nDisallow: /\n")); err != nil {
		log.Printf("robotsTxtHandler unable to write, with this error: %v", err)
	}
}

func metricsHandler(metricsFilename string, w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "text/plain; charset=utf-8")

	if metricsFilename == "" {
		http.NotFound(w, r)
		return
	}
	metricsFile, err := os.OpenFile(metricsFilename, os.O_RDONLY, 0644)
	if err != nil {
		log.Println("Error opening metrics file for reading")
		http.NotFound(w, r)
		return
	}

	if _, err := io.Copy(w, metricsFile); err != nil {
		log.Printf("copying metricsFile returned error: %v", err)
	}
}

func debugHandler(i *IPC, w http.ResponseWriter, r *http.Request) {
	var response string

	err := i.Debug(new(interface{}), &response)
	if err != nil {
		log.Println(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	if _, err := w.Write([]byte(response)); err != nil {
		log.Printf("writing proxy information returned error: %v ", err)
	}
}

/*
For snowflake proxies to request a client from the Broker.
*/
func proxyPolls(i *IPC, w http.ResponseWriter, r *http.Request) {
	body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
	if err != nil {
		log.Println("Invalid data.", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	arg := messages.Arg{
		Body:       body,
		RemoteAddr: util.GetClientIp(r),
	}

	var response []byte
	err = i.ProxyPolls(arg, &response)
	switch {
	case err == nil:
	case errors.Is(err, messages.ErrBadRequest):
		w.WriteHeader(http.StatusBadRequest)
		return
	case errors.Is(err, messages.ErrInternal):
		fallthrough
	default:
		log.Println(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	if _, err := w.Write(response); err != nil {
		log.Printf("proxyPolls unable to write offer with error: %v", err)
	}
}

/*
Expects a WebRTC SDP offer in the Request to give to an assigned
snowflake proxy, which responds with the SDP answer to be sent in
the HTTP response back to the client.
*/
func clientOffers(i *IPC, w http.ResponseWriter, r *http.Request) {
	body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
	if err != nil {
		log.Printf("Error reading client request: %s", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	// Handle the legacy version
	//
	// We support two client message formats. The legacy format is for backwards
	// compatability and relies heavily on HTTP headers and status codes to convey
	// information.
	isLegacy := false
	if len(body) > 0 && body[0] == '{' {
		isLegacy = true
		req := messages.ClientPollRequest{
			Offer: string(body),
			NAT:   r.Header.Get("Snowflake-NAT-Type"),
		}
		body, err = req.EncodeClientPollRequest()
		if err != nil {
			log.Printf("Error shimming the legacy request: %s", err.Error())
			w.WriteHeader(http.StatusInternalServerError)
			return
		}
	}

	arg := messages.Arg{
		Body:             body,
		RemoteAddr:       util.GetClientIp(r),
		RendezvousMethod: messages.RendezvousHttp,
	}

	var response []byte
	err = i.ClientOffers(arg, &response)
	if err != nil {
		log.Println(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	if isLegacy {
		resp, err := messages.DecodeClientPollResponse(response)
		if err != nil {
			log.Println(err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}
		switch resp.Error {
		case "":
			response = []byte(resp.Answer)
		case messages.StrNoProxies:
			w.WriteHeader(http.StatusServiceUnavailable)
			return
		case messages.StrTimedOut:
			w.WriteHeader(http.StatusGatewayTimeout)
			return
		default:
			panic("unknown error")
		}
	}

	if _, err := w.Write(response); err != nil {
		log.Printf("clientOffers unable to write answer with error: %v", err)
	}
}

/*
Expects snowflake proxies which have previously successfully received
an offer from proxyHandler to respond with an answer in an HTTP POST,
which the broker will pass back to the original client.
*/
func proxyAnswers(i *IPC, w http.ResponseWriter, r *http.Request) {
	body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
	if err != nil {
		log.Println("Invalid data.", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	err = validateSDP(body)
	if err != nil {
		log.Println("Error proxy SDP: ", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	arg := messages.Arg{
		Body:       body,
		RemoteAddr: util.GetClientIp(r),
	}

	var response []byte
	err = i.ProxyAnswers(arg, &response)
	switch {
	case err == nil:
	case errors.Is(err, messages.ErrBadRequest):
		w.WriteHeader(http.StatusBadRequest)
		return
	case errors.Is(err, messages.ErrInternal):
		fallthrough
	default:
		log.Println(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	if _, err := w.Write(response); err != nil {
		log.Printf("proxyAnswers unable to write answer response with error: %v", err)
	}
}

func validateSDP(SDP []byte) error {
	// TODO: more validation likely needed
	if !bytes.Contains(SDP, []byte("a=candidate")) {
		return fmt.Errorf("SDP contains no candidate")
	}

	return nil
}
07070100000011000081A400000000000000000000000167D9BD4E00002066000000000000000000000000000000000000001F00000000snowflake-2.11.0/broker/ipc.gopackage main

import (
	"container/heap"
	"encoding/hex"
	"fmt"
	"log"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/constants"

	"github.com/prometheus/client_golang/prometheus"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
)

const (
	ClientTimeout = constants.BrokerClientTimeout
	ProxyTimeout  = 10

	NATUnknown      = "unknown"
	NATRestricted   = "restricted"
	NATUnrestricted = "unrestricted"
)

type IPC struct {
	ctx *BrokerContext
}

func (i *IPC) Debug(_ interface{}, response *string) error {
	var unknowns int
	var natRestricted, natUnrestricted, natUnknown int
	proxyTypes := make(map[string]int)

	i.ctx.snowflakeLock.Lock()
	s := fmt.Sprintf("current snowflakes available: %d\n", len(i.ctx.idToSnowflake))
	for _, snowflake := range i.ctx.idToSnowflake {
		if messages.KnownProxyTypes[snowflake.proxyType] {
			proxyTypes[snowflake.proxyType]++
		} else {
			unknowns++
		}

		switch snowflake.natType {
		case NATRestricted:
			natRestricted++
		case NATUnrestricted:
			natUnrestricted++
		default:
			natUnknown++
		}

	}
	i.ctx.snowflakeLock.Unlock()

	for pType, num := range proxyTypes {
		s += fmt.Sprintf("\t%s proxies: %d\n", pType, num)
	}
	s += fmt.Sprintf("\tunknown proxies: %d", unknowns)

	s += fmt.Sprintf("\nNAT Types available:")
	s += fmt.Sprintf("\n\trestricted: %d", natRestricted)
	s += fmt.Sprintf("\n\tunrestricted: %d", natUnrestricted)
	s += fmt.Sprintf("\n\tunknown: %d", natUnknown)

	*response = s
	return nil
}

func (i *IPC) ProxyPolls(arg messages.Arg, response *[]byte) error {
	sid, proxyType, natType, clients, relayPattern, relayPatternSupported, err := messages.DecodeProxyPollRequestWithRelayPrefix(arg.Body)
	if err != nil {
		return messages.ErrBadRequest
	}

	if !relayPatternSupported {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.proxyPollWithoutRelayURLExtension++
		i.ctx.metrics.promMetrics.ProxyPollWithoutRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
		i.ctx.metrics.lock.Unlock()
	} else {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.proxyPollWithRelayURLExtension++
		i.ctx.metrics.promMetrics.ProxyPollWithRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
		i.ctx.metrics.lock.Unlock()
	}

	if !i.ctx.CheckProxyRelayPattern(relayPattern, !relayPatternSupported) {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.proxyPollRejectedWithRelayURLExtension++
		i.ctx.metrics.promMetrics.ProxyPollRejectedForRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc()
		i.ctx.metrics.lock.Unlock()

		log.Printf("bad request: rejected relay pattern from proxy = %v", messages.ErrBadRequest)
		b, err := messages.EncodePollResponseWithRelayURL("", false, "", "", "incorrect relay pattern")
		*response = b
		if err != nil {
			return messages.ErrInternal
		}
		return nil
	}

	// Log geoip stats
	remoteIP := arg.RemoteAddr
	if err != nil {
		log.Println("Warning: cannot process proxy IP: ", err.Error())
	} else {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.UpdateCountryStats(remoteIP, proxyType, natType)
		i.ctx.metrics.lock.Unlock()
	}

	var b []byte

	// Wait for a client to avail an offer to the snowflake, or timeout if nil.
	offer := i.ctx.RequestOffer(sid, proxyType, natType, clients)

	if offer == nil {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.proxyIdleCount++
		i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "status": "idle"}).Inc()
		i.ctx.metrics.lock.Unlock()

		b, err = messages.EncodePollResponse("", false, "")
		if err != nil {
			return messages.ErrInternal
		}

		*response = b
		return nil
	}

	i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "status": "matched"}).Inc()
	var relayURL string
	bridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(offer.fingerprint)
	if err != nil {
		return messages.ErrBadRequest
	}
	if info, err := i.ctx.bridgeList.GetBridgeInfo(bridgeFingerprint); err != nil {
		return err
	} else {
		relayURL = info.WebSocketAddress
	}
	b, err = messages.EncodePollResponseWithRelayURL(string(offer.sdp), true, offer.natType, relayURL, "")
	if err != nil {
		return messages.ErrInternal
	}
	*response = b

	return nil
}

func sendClientResponse(resp *messages.ClientPollResponse, response *[]byte) error {
	data, err := resp.EncodePollResponse()
	if err != nil {
		log.Printf("error encoding answer")
		return messages.ErrInternal
	} else {
		*response = []byte(data)
		return nil
	}
}

func (i *IPC) ClientOffers(arg messages.Arg, response *[]byte) error {

	startTime := time.Now()

	req, err := messages.DecodeClientPollRequest(arg.Body)
	if err != nil {
		return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
	}

	offer := &ClientOffer{
		natType: req.NAT,
		sdp:     []byte(req.Offer),
	}

	fingerprint, err := hex.DecodeString(req.Fingerprint)
	if err != nil {
		return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
	}

	BridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(fingerprint)
	if err != nil {
		return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response)
	}

	if _, err := i.ctx.GetBridgeInfo(BridgeFingerprint); err != nil {
		return sendClientResponse(
			&messages.ClientPollResponse{Error: err.Error()},
			response,
		)
	}

	offer.fingerprint = BridgeFingerprint.ToBytes()

	snowflake := i.matchSnowflake(offer.natType)
	if snowflake != nil {
		snowflake.offerChannel <- offer
	} else {
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "denied")
		i.ctx.metrics.lock.Unlock()
		resp := &messages.ClientPollResponse{Error: messages.StrNoProxies}
		return sendClientResponse(resp, response)
	}

	// Wait for the answer to be returned on the channel or timeout.
	select {
	case answer := <-snowflake.answerChannel:
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "matched")
		i.ctx.metrics.lock.Unlock()
		resp := &messages.ClientPollResponse{Answer: answer}
		err = sendClientResponse(resp, response)
		// Initial tracking of elapsed time.
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.clientRoundtripEstimate = time.Since(startTime) / time.Millisecond
		i.ctx.metrics.lock.Unlock()
	case <-time.After(time.Second * ClientTimeout):
		i.ctx.metrics.lock.Lock()
		i.ctx.metrics.UpdateRendezvousStats(arg.RemoteAddr, arg.RendezvousMethod, offer.natType, "timeout")
		i.ctx.metrics.lock.Unlock()
		resp := &messages.ClientPollResponse{Error: messages.StrTimedOut}
		err = sendClientResponse(resp, response)
	}

	i.ctx.snowflakeLock.Lock()
	i.ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": snowflake.natType, "type": snowflake.proxyType}).Dec()
	delete(i.ctx.idToSnowflake, snowflake.id)
	i.ctx.snowflakeLock.Unlock()

	return err
}

func (i *IPC) matchSnowflake(natType string) *Snowflake {
	i.ctx.snowflakeLock.Lock()
	defer i.ctx.snowflakeLock.Unlock()

	// Proiritize known restricted snowflakes for unrestricted clients
	if natType == NATUnrestricted && i.ctx.restrictedSnowflakes.Len() > 0 {
		return heap.Pop(i.ctx.restrictedSnowflakes).(*Snowflake)
	}

	if i.ctx.snowflakes.Len() > 0 {
		return heap.Pop(i.ctx.snowflakes).(*Snowflake)
	}

	return nil
}

func (i *IPC) ProxyAnswers(arg messages.Arg, response *[]byte) error {
	answer, id, err := messages.DecodeAnswerRequest(arg.Body)
	if err != nil || answer == "" {
		return messages.ErrBadRequest
	}

	var success = true
	i.ctx.snowflakeLock.Lock()
	snowflake, ok := i.ctx.idToSnowflake[id]
	i.ctx.snowflakeLock.Unlock()
	if !ok || snowflake == nil {
		// The snowflake took too long to respond with an answer, so its client
		// disappeared / the snowflake is no longer recognized by the Broker.
		success = false
	}

	b, err := messages.EncodeAnswerResponse(success)
	if err != nil {
		log.Printf("Error encoding answer: %s", err.Error())
		return messages.ErrInternal
	}
	*response = b

	if success {
		snowflake.answerChannel <- answer
	}

	return nil
}
07070100000012000081A400000000000000000000000167D9BD4E0000337A000000000000000000000000000000000000002300000000snowflake-2.11.0/broker/metrics.go/*
We export metrics in the format specified in our broker spec:
https://gitweb.torproject.org/pluggable-transports/snowflake.git/tree/doc/broker-spec.txt
*/

package main

import (
	"fmt"
	"log"
	"math"
	"net"
	"sort"
	"sync"
	"time"

	"github.com/prometheus/client_golang/prometheus"
	"gitlab.torproject.org/tpo/anti-censorship/geoip"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safeprom"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
)

const (
	prometheusNamespace = "snowflake"
	metricsResolution   = 60 * 60 * 24 * time.Second //86400 seconds
)

var rendezvoudMethodList = [...]messages.RendezvousMethod{
	messages.RendezvousHttp,
	messages.RendezvousAmpCache,
	messages.RendezvousSqs,
}

type CountryStats struct {
	// map[proxyType][address]bool
	proxies map[string]map[string]bool
	unknown map[string]bool

	natRestricted   map[string]bool
	natUnrestricted map[string]bool
	natUnknown      map[string]bool

	counts map[string]int
}

// Implements Observable
type Metrics struct {
	logger  *log.Logger
	geoipdb *geoip.Geoip

	countryStats                  CountryStats
	clientRoundtripEstimate       time.Duration
	proxyIdleCount                uint
	clientDeniedCount             map[messages.RendezvousMethod]uint
	clientRestrictedDeniedCount   map[messages.RendezvousMethod]uint
	clientUnrestrictedDeniedCount map[messages.RendezvousMethod]uint
	clientProxyMatchCount         map[messages.RendezvousMethod]uint
	clientProxyTimeoutCount       map[messages.RendezvousMethod]uint

	rendezvousCountryStats map[messages.RendezvousMethod]map[string]int

	proxyPollWithRelayURLExtension         uint
	proxyPollWithoutRelayURLExtension      uint
	proxyPollRejectedWithRelayURLExtension uint

	// synchronization for access to snowflake metrics
	lock sync.Mutex

	promMetrics *PromMetrics
}

type record struct {
	cc    string
	count int
}
type records []record

func (r records) Len() int      { return len(r) }
func (r records) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r records) Less(i, j int) bool {
	if r[i].count == r[j].count {
		return r[i].cc > r[j].cc
	}
	return r[i].count < r[j].count
}

func (s CountryStats) Display() string {
	output := ""

	// Use the records struct to sort our counts map by value.
	rs := records{}
	for cc, count := range s.counts {
		rs = append(rs, record{cc: cc, count: count})
	}
	sort.Sort(sort.Reverse(rs))
	for _, r := range rs {
		output += fmt.Sprintf("%s=%d,", r.cc, r.count)
	}

	// cut off trailing ","
	if len(output) > 0 {
		return output[:len(output)-1]
	}

	return output
}

func (m *Metrics) UpdateCountryStats(addr string, proxyType string, natType string) {
	var country string
	var ok bool

	addresses, ok := m.countryStats.proxies[proxyType]
	if !ok {
		if m.countryStats.unknown[addr] {
			return
		}
		m.countryStats.unknown[addr] = true
	} else {
		if addresses[addr] {
			return
		}
		addresses[addr] = true
	}

	ip := net.ParseIP(addr)
	if m.geoipdb == nil {
		return
	}
	country, ok = m.geoipdb.GetCountryByAddr(ip)
	if !ok {
		country = "??"
	}
	m.countryStats.counts[country]++

	m.promMetrics.ProxyTotal.With(prometheus.Labels{
		"nat":  natType,
		"type": proxyType,
		"cc":   country,
	}).Inc()

	switch natType {
	case NATRestricted:
		m.countryStats.natRestricted[addr] = true
	case NATUnrestricted:
		m.countryStats.natUnrestricted[addr] = true
	default:
		m.countryStats.natUnknown[addr] = true
	}
}

func (m *Metrics) UpdateRendezvousStats(addr string, rendezvousMethod messages.RendezvousMethod, natType, status string) {
	ip := net.ParseIP(addr)
	country := "??"
	if m.geoipdb != nil {
		country_by_addr, ok := m.geoipdb.GetCountryByAddr(ip)
		if ok {
			country = country_by_addr
		}
	}

	switch status {
	case "denied":
		m.clientDeniedCount[rendezvousMethod]++
		if natType == NATUnrestricted {
			m.clientUnrestrictedDeniedCount[rendezvousMethod]++
		} else {
			m.clientRestrictedDeniedCount[rendezvousMethod]++
		}
	case "matched":
		m.clientProxyMatchCount[rendezvousMethod]++
	case "timeout":
		m.clientProxyTimeoutCount[rendezvousMethod]++
	default:
		log.Printf("Unknown rendezvous status: %s", status)
	}
	m.rendezvousCountryStats[rendezvousMethod][country]++
	m.promMetrics.ClientPollTotal.With(prometheus.Labels{
		"nat":               natType,
		"status":            status,
		"rendezvous_method": string(rendezvousMethod),
		"cc":                country,
	}).Inc()
}

func (m *Metrics) DisplayRendezvousStatsByCountry(rendezvoudMethod messages.RendezvousMethod) string {
	output := ""

	// Use the records struct to sort our counts map by value.
	rs := records{}
	for cc, count := range m.rendezvousCountryStats[rendezvoudMethod] {
		rs = append(rs, record{cc: cc, count: count})
	}
	sort.Sort(sort.Reverse(rs))
	for _, r := range rs {
		output += fmt.Sprintf("%s=%d,", r.cc, binCount(uint(r.count)))
	}

	// cut off trailing ","
	if len(output) > 0 {
		return output[:len(output)-1]
	}

	return output
}

func (m *Metrics) LoadGeoipDatabases(geoipDB string, geoip6DB string) error {

	// Load geoip databases
	var err error
	log.Println("Loading geoip databases")
	m.geoipdb, err = geoip.New(geoipDB, geoip6DB)
	return err
}

func NewMetrics(metricsLogger *log.Logger) (*Metrics, error) {
	m := new(Metrics)

	m.clientDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.clientRestrictedDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.clientUnrestrictedDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.clientProxyMatchCount = make(map[messages.RendezvousMethod]uint)
	m.clientProxyTimeoutCount = make(map[messages.RendezvousMethod]uint)

	m.rendezvousCountryStats = make(map[messages.RendezvousMethod]map[string]int)
	for _, rendezvousMethod := range rendezvoudMethodList {
		m.rendezvousCountryStats[rendezvousMethod] = make(map[string]int)
	}

	m.countryStats = CountryStats{
		counts:          make(map[string]int),
		proxies:         make(map[string]map[string]bool),
		unknown:         make(map[string]bool),
		natRestricted:   make(map[string]bool),
		natUnrestricted: make(map[string]bool),
		natUnknown:      make(map[string]bool),
	}
	for pType := range messages.KnownProxyTypes {
		m.countryStats.proxies[pType] = make(map[string]bool)
	}

	m.logger = metricsLogger
	m.promMetrics = initPrometheus()

	// Write to log file every day with updated metrics
	go m.logMetrics()

	return m, nil
}

// Logs metrics in intervals specified by metricsResolution
func (m *Metrics) logMetrics() {
	heartbeat := time.Tick(metricsResolution)
	for range heartbeat {
		m.printMetrics()
		m.zeroMetrics()
	}
}

func (m *Metrics) printMetrics() {
	m.lock.Lock()
	m.logger.Println(
		"snowflake-stats-end",
		time.Now().UTC().Format("2006-01-02 15:04:05"),
		fmt.Sprintf("(%d s)", int(metricsResolution.Seconds())),
	)
	m.logger.Println("snowflake-ips", m.countryStats.Display())
	total := len(m.countryStats.unknown)
	for pType, addresses := range m.countryStats.proxies {
		m.logger.Printf("snowflake-ips-%s %d\n", pType, len(addresses))
		total += len(addresses)
	}
	m.logger.Println("snowflake-ips-total", total)
	m.logger.Println("snowflake-idle-count", binCount(m.proxyIdleCount))
	m.logger.Println("snowflake-proxy-poll-with-relay-url-count", binCount(m.proxyPollWithRelayURLExtension))
	m.logger.Println("snowflake-proxy-poll-without-relay-url-count", binCount(m.proxyPollWithoutRelayURLExtension))
	m.logger.Println("snowflake-proxy-rejected-for-relay-url-count", binCount(m.proxyPollRejectedWithRelayURLExtension))

	m.logger.Println("client-denied-count", binCount(sumMapValues(&m.clientDeniedCount)))
	m.logger.Println("client-restricted-denied-count", binCount(sumMapValues(&m.clientRestrictedDeniedCount)))
	m.logger.Println("client-unrestricted-denied-count", binCount(sumMapValues(&m.clientUnrestrictedDeniedCount)))
	m.logger.Println("client-snowflake-match-count", binCount(sumMapValues(&m.clientProxyMatchCount)))
	m.logger.Println("client-snowflake-timeout-count", binCount(sumMapValues(&m.clientProxyTimeoutCount)))

	for _, rendezvousMethod := range rendezvoudMethodList {
		m.logger.Printf("client-%s-count %d\n", rendezvousMethod, binCount(
			m.clientDeniedCount[rendezvousMethod]+m.clientProxyMatchCount[rendezvousMethod],
		))
		m.logger.Printf("client-%s-ips %s\n", rendezvousMethod, m.DisplayRendezvousStatsByCountry(rendezvousMethod))
	}

	m.logger.Println("snowflake-ips-nat-restricted", len(m.countryStats.natRestricted))
	m.logger.Println("snowflake-ips-nat-unrestricted", len(m.countryStats.natUnrestricted))
	m.logger.Println("snowflake-ips-nat-unknown", len(m.countryStats.natUnknown))
	m.lock.Unlock()
}

// Restores all metrics to original values
func (m *Metrics) zeroMetrics() {
	m.proxyIdleCount = 0
	m.clientDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.clientRestrictedDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.clientUnrestrictedDeniedCount = make(map[messages.RendezvousMethod]uint)
	m.proxyPollRejectedWithRelayURLExtension = 0
	m.proxyPollWithRelayURLExtension = 0
	m.proxyPollWithoutRelayURLExtension = 0
	m.clientProxyMatchCount = make(map[messages.RendezvousMethod]uint)
	m.clientProxyTimeoutCount = make(map[messages.RendezvousMethod]uint)

	m.rendezvousCountryStats = make(map[messages.RendezvousMethod]map[string]int)
	for _, rendezvousMethod := range rendezvoudMethodList {
		m.rendezvousCountryStats[rendezvousMethod] = make(map[string]int)
	}

	m.countryStats.counts = make(map[string]int)
	for pType := range m.countryStats.proxies {
		m.countryStats.proxies[pType] = make(map[string]bool)
	}
	m.countryStats.unknown = make(map[string]bool)
	m.countryStats.natRestricted = make(map[string]bool)
	m.countryStats.natUnrestricted = make(map[string]bool)
	m.countryStats.natUnknown = make(map[string]bool)
}

// Rounds up a count to the nearest multiple of 8.
func binCount(count uint) uint {
	return uint((math.Ceil(float64(count) / 8)) * 8)
}

func sumMapValues(m *map[messages.RendezvousMethod]uint) uint {
	var s uint = 0
	for _, v := range *m {
		s += v
	}
	return s
}

type PromMetrics struct {
	registry         *prometheus.Registry
	ProxyTotal       *prometheus.CounterVec
	ProxyPollTotal   *safeprom.CounterVec
	ClientPollTotal  *safeprom.CounterVec
	AvailableProxies *prometheus.GaugeVec

	ProxyPollWithRelayURLExtensionTotal    *safeprom.CounterVec
	ProxyPollWithoutRelayURLExtensionTotal *safeprom.CounterVec

	ProxyPollRejectedForRelayURLExtensionTotal *safeprom.CounterVec
}

// Initialize metrics for prometheus exporter
func initPrometheus() *PromMetrics {
	promMetrics := &PromMetrics{}

	promMetrics.registry = prometheus.NewRegistry()

	promMetrics.ProxyTotal = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "proxy_total",
			Help:      "The number of unique snowflake IPs",
		},
		[]string{"type", "nat", "cc"},
	)

	promMetrics.AvailableProxies = prometheus.NewGaugeVec(
		prometheus.GaugeOpts{
			Namespace: prometheusNamespace,
			Name:      "available_proxies",
			Help:      "The number of currently available snowflake proxies",
		},
		[]string{"type", "nat"},
	)

	promMetrics.ProxyPollTotal = safeprom.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "rounded_proxy_poll_total",
			Help:      "The number of snowflake proxy polls, rounded up to a multiple of 8",
		},
		[]string{"nat", "status"},
	)

	promMetrics.ProxyPollWithRelayURLExtensionTotal = safeprom.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "rounded_proxy_poll_with_relay_url_extension_total",
			Help:      "The number of snowflake proxy polls with Relay URL Extension, rounded up to a multiple of 8",
		},
		[]string{"nat", "type"},
	)

	promMetrics.ProxyPollWithoutRelayURLExtensionTotal = safeprom.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "rounded_proxy_poll_without_relay_url_extension_total",
			Help:      "The number of snowflake proxy polls without Relay URL Extension, rounded up to a multiple of 8",
		},
		[]string{"nat", "type"},
	)

	promMetrics.ProxyPollRejectedForRelayURLExtensionTotal = safeprom.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "rounded_proxy_poll_rejected_relay_url_extension_total",
			Help:      "The number of snowflake proxy polls rejected by Relay URL Extension, rounded up to a multiple of 8",
		},
		[]string{"nat", "type"},
	)

	promMetrics.ClientPollTotal = safeprom.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: prometheusNamespace,
			Name:      "rounded_client_poll_total",
			Help:      "The number of snowflake client polls, rounded up to a multiple of 8",
		},
		[]string{"nat", "status", "cc", "rendezvous_method"},
	)

	// We need to register our metrics so they can be exported.
	promMetrics.registry.MustRegister(
		promMetrics.ClientPollTotal, promMetrics.ProxyPollTotal,
		promMetrics.ProxyTotal, promMetrics.AvailableProxies,
		promMetrics.ProxyPollWithRelayURLExtensionTotal,
		promMetrics.ProxyPollWithoutRelayURLExtensionTotal,
		promMetrics.ProxyPollRejectedForRelayURLExtensionTotal,
	)

	return promMetrics
}
07070100000013000081A400000000000000000000000167D9BD4E00007AC4000000000000000000000000000000000000003100000000snowflake-2.11.0/broker/snowflake-broker_test.gopackage main

import (
	"bytes"
	"container/heap"
	"encoding/hex"
	"fmt"
	"io"
	"log"
	"net/http"
	"net/http/httptest"
	"os"
	"sync"
	"testing"
	"time"

	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
)

func NullLogger() *log.Logger {
	logger := log.New(os.Stdout, "", 0)
	logger.SetOutput(io.Discard)
	return logger
}

var promOnce sync.Once

var (
	sdp = "v=0\r\n" +
		"o=- 123456789 987654321 IN IP4 0.0.0.0\r\n" +
		"s=-\r\n" +
		"t=0 0\r\n" +
		"a=fingerprint:sha-256 12:34\r\n" +
		"a=extmap-allow-mixed\r\n" +
		"a=group:BUNDLE 0\r\n" +
		"m=application 9 UDP/DTLS/SCTP webrtc-datachannel\r\n" +
		"c=IN IP4 0.0.0.0\r\n" +
		"a=setup:actpass\r\n" +
		"a=mid:0\r\n" +
		"a=sendrecv\r\n" +
		"a=sctp-port:5000\r\n" +
		"a=ice-ufrag:CoVEaiFXRGVzshXG\r\n" +
		"a=ice-pwd:aOrOZXraTfFKzyeBxIXYYKjSgRVPGhUx\r\n" +
		"a=candidate:1000 1 udp 2000 8.8.8.8 3000 typ host\r\n" +
		"a=end-of-candidates\r\n"

	sid = "ymbcCMto7KHNGYlp"
)

func createClientOffer(sdp, nat, fingerprint string) (*bytes.Reader, error) {
	clientRequest := &messages.ClientPollRequest{
		Offer:       sdp,
		NAT:         nat,
		Fingerprint: fingerprint,
	}
	encOffer, err := clientRequest.EncodeClientPollRequest()
	if err != nil {
		return nil, err
	}
	offer := bytes.NewReader(encOffer)
	return offer, nil
}

func createProxyAnswer(sdp, sid string) (*bytes.Reader, error) {
	proxyRequest, err := messages.EncodeAnswerRequest(sdp, sid)
	if err != nil {
		return nil, err
	}
	answer := bytes.NewReader(proxyRequest)
	return answer, nil
}

func decodeAMPArmorToString(r io.Reader) (string, error) {
	dec, err := amp.NewArmorDecoder(r)
	if err != nil {
		return "", err
	}
	p, err := io.ReadAll(dec)
	return string(p), err
}

func TestBroker(t *testing.T) {

	defaultBridgeValue, _ := hex.DecodeString("2B280B23E1107BB62ABFC40DDCC8824814F80A72")
	var defaultBridge [20]byte
	copy(defaultBridge[:], defaultBridgeValue)

	Convey("Context", t, func() {
		buf := new(bytes.Buffer)
		ctx := NewBrokerContext(log.New(buf, "", 0), "", "")
		i := &IPC{ctx}

		Convey("Adds Snowflake", func() {
			So(ctx.snowflakes.Len(), ShouldEqual, 0)
			So(len(ctx.idToSnowflake), ShouldEqual, 0)
			ctx.AddSnowflake("foo", "", NATUnrestricted, 0)
			So(ctx.snowflakes.Len(), ShouldEqual, 1)
			So(len(ctx.idToSnowflake), ShouldEqual, 1)
		})

		Convey("Broker goroutine matches clients with proxies", func() {
			p := new(ProxyPoll)
			p.id = "test"
			p.natType = "unrestricted"
			p.offerChannel = make(chan *ClientOffer)
			go func(ctx *BrokerContext) {
				ctx.proxyPolls <- p
				close(ctx.proxyPolls)
			}(ctx)
			ctx.Broker()
			So(ctx.snowflakes.Len(), ShouldEqual, 1)
			snowflake := heap.Pop(ctx.snowflakes).(*Snowflake)
			snowflake.offerChannel <- &ClientOffer{sdp: []byte("test offer")}
			offer := <-p.offerChannel
			So(ctx.idToSnowflake["test"], ShouldNotBeNil)
			So(offer.sdp, ShouldResemble, []byte("test offer"))
			So(ctx.snowflakes.Len(), ShouldEqual, 0)
		})

		Convey("Request an offer from the Snowflake Heap", func() {
			done := make(chan *ClientOffer)
			go func() {
				offer := ctx.RequestOffer("test", "", NATUnrestricted, 0)
				done <- offer
			}()
			request := <-ctx.proxyPolls
			request.offerChannel <- &ClientOffer{sdp: []byte("test offer")}
			offer := <-done
			So(offer.sdp, ShouldResemble, []byte("test offer"))
		})

		Convey("Responds to HTTP client offers...", func() {
			w := httptest.NewRecorder()
			data, err := createClientOffer(sdp, NATUnknown, "")
			r, err := http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)

			Convey("with error when no snowflakes are available.", func() {
				clientOffers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusOK)
				So(w.Body.String(), ShouldEqual, `{"error":"no snowflake proxies currently available"}`)

				// Ensure that denial is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 8
client-restricted-denied-count 8
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 8
client-http-ips ??=8
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("with a proxy answer if available.", func() {
				done := make(chan bool)
				// Prepare a fake proxy to respond with.
				snowflake := ctx.AddSnowflake("test", "", NATUnrestricted, 0)
				go func() {
					clientOffers(i, w, r)
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte(sdp))
				snowflake.answerChannel <- "test answer"
				<-done
				So(w.Body.String(), ShouldEqual, `{"answer":"test answer"}`)
				So(w.Code, ShouldEqual, http.StatusOK)

				// Ensure that match is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 8
client-snowflake-timeout-count 0
client-http-count 8
client-http-ips ??=8
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("with unrestricted proxy to unrestricted client if there are no restricted proxies", func() {
				snowflake := ctx.AddSnowflake("test", "", NATUnrestricted, 0)
				offerData, err := createClientOffer(sdp, NATUnrestricted, "")
				So(err, ShouldBeNil)
				r, err := http.NewRequest("POST", "snowflake.broker/client", offerData)

				done := make(chan bool)
				go func() {
					clientOffers(i, w, r)
					done <- true
				}()

				select {
				case <-snowflake.offerChannel:
				case <-time.After(250 * time.Millisecond):
					So(false, ShouldBeTrue)
					return
				}
				snowflake.answerChannel <- "test answer"

				<-done
				So(w.Body.String(), ShouldEqual, `{"answer":"test answer"}`)
			})

			Convey("Times out when no proxy responds.", func() {
				if testing.Short() {
					return
				}
				done := make(chan bool)
				snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
				go func() {
					clientOffers(i, w, r)
					// Takes a few seconds here...
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte(sdp))
				<-done
				So(w.Code, ShouldEqual, http.StatusOK)
				So(w.Body.String(), ShouldEqual, `{"error":"timed out waiting for answer!"}`)
			})
		})

		Convey("Responds to HTTP legacy client offers...", func() {
			w := httptest.NewRecorder()
			// legacy offer starts with {
			offer := bytes.NewReader([]byte(fmt.Sprintf(`{%v}`, sdp)))
			r, err := http.NewRequest("POST", "snowflake.broker/client", offer)
			So(err, ShouldBeNil)
			r.Header.Set("Snowflake-NAT-TYPE", "restricted")

			Convey("with 503 when no snowflakes are available.", func() {
				clientOffers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusServiceUnavailable)
				So(w.Body.String(), ShouldEqual, "")

				// Ensure that denial is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 8
client-restricted-denied-count 8
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 8
client-http-ips ??=8
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("with a proxy answer if available.", func() {
				done := make(chan bool)
				// Prepare a fake proxy to respond with.
				snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
				go func() {
					clientOffers(i, w, r)
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte(fmt.Sprintf(`{%v}`, sdp)))
				snowflake.answerChannel <- "fake answer"
				<-done
				So(w.Body.String(), ShouldEqual, "fake answer")
				So(w.Code, ShouldEqual, http.StatusOK)

				// Ensure that match is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 8
client-snowflake-timeout-count 0
client-http-count 8
client-http-ips ??=8
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("Times out when no proxy responds.", func() {
				if testing.Short() {
					return
				}
				done := make(chan bool)
				snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
				go func() {
					clientOffers(i, w, r)
					// Takes a few seconds here...
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte(fmt.Sprintf(`{%v}`, sdp)))
				<-done
				So(w.Code, ShouldEqual, http.StatusGatewayTimeout)
			})

		})

		Convey("Responds to AMP client offers...", func() {
			w := httptest.NewRecorder()
			encPollReq := []byte("1.0\n{\"offer\": \"fake\", \"nat\": \"unknown\"}")
			r, err := http.NewRequest("GET", "/amp/client/"+amp.EncodePath(encPollReq), nil)
			So(err, ShouldBeNil)

			Convey("with status 200 when request is badly formatted.", func() {
				r, err := http.NewRequest("GET", "/amp/client/bad", nil)
				So(err, ShouldBeNil)
				ampClientOffers(i, w, r)
				body, err := decodeAMPArmorToString(w.Body)
				So(err, ShouldBeNil)
				So(body, ShouldEqual, `{"error":"cannot decode URL path"}`)
			})

			Convey("with error when no snowflakes are available.", func() {
				ampClientOffers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusOK)
				body, err := decodeAMPArmorToString(w.Body)
				So(err, ShouldBeNil)
				So(body, ShouldEqual, `{"error":"no snowflake proxies currently available"}`)

				// Ensure that denial is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 8
client-restricted-denied-count 8
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 0
client-http-ips 
client-ampcache-count 8
client-ampcache-ips ??=8
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("with a proxy answer if available.", func() {
				done := make(chan bool)
				// Prepare a fake proxy to respond with.
				snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
				go func() {
					ampClientOffers(i, w, r)
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte("fake"))
				snowflake.answerChannel <- "fake answer"
				<-done
				body, err := decodeAMPArmorToString(w.Body)
				So(err, ShouldBeNil)
				So(body, ShouldEqual, `{"answer":"fake answer"}`)
				So(w.Code, ShouldEqual, http.StatusOK)

				// Ensure that match is correctly recorded in metrics
				ctx.metrics.printMetrics()
				So(buf.String(), ShouldContainSubstring, `client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 8
client-snowflake-timeout-count 0
client-http-count 0
client-http-ips 
client-ampcache-count 8
client-ampcache-ips ??=8
client-sqs-count 0
client-sqs-ips 
`)
			})

			Convey("Times out when no proxy responds.", func() {
				if testing.Short() {
					return
				}
				done := make(chan bool)
				snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
				go func() {
					ampClientOffers(i, w, r)
					// Takes a few seconds here...
					done <- true
				}()
				offer := <-snowflake.offerChannel
				So(offer.sdp, ShouldResemble, []byte("fake"))
				<-done
				So(w.Code, ShouldEqual, http.StatusOK)
				body, err := decodeAMPArmorToString(w.Body)
				So(err, ShouldBeNil)
				So(body, ShouldEqual, `{"error":"timed out waiting for answer!"}`)
			})

		})

		Convey("Responds to proxy polls...", func() {
			done := make(chan bool)
			w := httptest.NewRecorder()
			data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`))
			r, err := http.NewRequest("POST", "snowflake.broker/proxy", data)
			So(err, ShouldBeNil)

			Convey("with a client offer if available.", func() {
				go func(i *IPC) {
					proxyPolls(i, w, r)
					done <- true
				}(i)
				// Pass a fake client offer to this proxy
				p := <-ctx.proxyPolls
				So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp")
				p.offerChannel <- &ClientOffer{sdp: []byte("fake offer"), fingerprint: defaultBridge[:]}
				<-done
				So(w.Code, ShouldEqual, http.StatusOK)
				So(w.Body.String(), ShouldEqual, `{"Status":"client match","Offer":"fake offer","NAT":"","RelayURL":"wss://snowflake.torproject.net/"}`)
			})

			Convey("return empty 200 OK when no client offer is available.", func() {
				go func(i *IPC) {
					proxyPolls(i, w, r)
					done <- true
				}(i)
				p := <-ctx.proxyPolls
				So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp")
				// nil means timeout
				p.offerChannel <- nil
				<-done
				So(w.Body.String(), ShouldEqual, `{"Status":"no match","Offer":"","NAT":"","RelayURL":""}`)
				So(w.Code, ShouldEqual, http.StatusOK)
			})
		})

		Convey("Responds to proxy answers...", func() {
			done := make(chan bool)
			s := ctx.AddSnowflake(sid, "", NATUnrestricted, 0)
			w := httptest.NewRecorder()

			data, err := createProxyAnswer(sdp, sid)
			So(err, ShouldBeNil)

			Convey("by passing to the client if valid.", func() {
				r, err := http.NewRequest("POST", "snowflake.broker/answer", data)
				So(err, ShouldBeNil)
				go func(i *IPC) {
					proxyAnswers(i, w, r)
					done <- true
				}(i)
				answer := <-s.answerChannel
				<-done
				So(w.Code, ShouldEqual, http.StatusOK)
				So(answer, ShouldResemble, sdp)
			})

			Convey("with client gone status if the proxy ID is not recognized", func() {
				data, err := createProxyAnswer(sdp, "invalid")
				r, err := http.NewRequest("POST", "snowflake.broker/answer", data)
				So(err, ShouldBeNil)
				proxyAnswers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusOK)
				b, err := io.ReadAll(w.Body)
				So(err, ShouldBeNil)
				So(b, ShouldResemble, []byte(`{"Status":"client gone"}`))
			})

			Convey("with error if the proxy gives invalid answer", func() {
				data := bytes.NewReader(nil)
				r, err := http.NewRequest("POST", "snowflake.broker/answer", data)
				So(err, ShouldBeNil)
				proxyAnswers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusBadRequest)
			})

			Convey("with error if the proxy writes too much data", func() {
				data := bytes.NewReader(make([]byte, 100001))
				r, err := http.NewRequest("POST", "snowflake.broker/answer", data)
				So(err, ShouldBeNil)
				proxyAnswers(i, w, r)
				So(w.Code, ShouldEqual, http.StatusBadRequest)
			})

		})

	})

	Convey("End-To-End", t, func() {
		ctx := NewBrokerContext(NullLogger(), "", "")
		i := &IPC{ctx}

		Convey("Check for client/proxy data race", func() {
			proxy_done := make(chan bool)
			client_done := make(chan bool)

			go ctx.Broker()

			// Make proxy poll
			wp := httptest.NewRecorder()
			datap := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`))
			rp, err := http.NewRequest("POST", "snowflake.broker/proxy", datap)
			So(err, ShouldBeNil)

			go func(i *IPC) {
				proxyPolls(i, wp, rp)
				proxy_done <- true
			}(i)

			// Client offer
			wc := httptest.NewRecorder()
			datac, err := createClientOffer(sdp, NATUnknown, "")
			So(err, ShouldBeNil)
			rc, err := http.NewRequest("POST", "snowflake.broker/client", datac)
			So(err, ShouldBeNil)

			go func() {
				clientOffers(i, wc, rc)
				client_done <- true
			}()

			<-proxy_done
			So(wp.Code, ShouldEqual, http.StatusOK)

			// Proxy answers
			wp = httptest.NewRecorder()
			datap, err = createProxyAnswer(sdp, sid)
			So(err, ShouldBeNil)
			rp, err = http.NewRequest("POST", "snowflake.broker/answer", datap)
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyAnswers(i, wp, rp)
				proxy_done <- true
			}(i)

			<-proxy_done
			<-client_done

		})

		Convey("Ensure correct snowflake brokering", func() {
			done := make(chan bool)
			polled := make(chan bool)

			// Proxy polls with its ID first...
			dataP := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`))
			wP := httptest.NewRecorder()
			rP, err := http.NewRequest("POST", "snowflake.broker/proxy", dataP)
			So(err, ShouldBeNil)
			go func() {
				proxyPolls(i, wP, rP)
				polled <- true
			}()

			// Manually do the Broker goroutine action here for full control.
			p := <-ctx.proxyPolls
			So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp")
			s := ctx.AddSnowflake(p.id, "", NATUnrestricted, 0)
			go func() {
				offer := <-s.offerChannel
				p.offerChannel <- offer
			}()
			So(ctx.idToSnowflake["ymbcCMto7KHNGYlp"], ShouldNotBeNil)

			// Client request blocks until proxy answer arrives.
			wC := httptest.NewRecorder()
			dataC, err := createClientOffer(sdp, NATUnknown, "")
			So(err, ShouldBeNil)
			rC, err := http.NewRequest("POST", "snowflake.broker/client", dataC)
			So(err, ShouldBeNil)
			go func() {
				clientOffers(i, wC, rC)
				done <- true
			}()

			<-polled
			So(wP.Code, ShouldEqual, http.StatusOK)
			So(wP.Body.String(), ShouldResemble, fmt.Sprintf(`{"Status":"client match","Offer":%#q,"NAT":"unknown","RelayURL":"wss://snowflake.torproject.net/"}`, sdp))
			So(ctx.idToSnowflake[sid], ShouldNotBeNil)

			// Follow up with the answer request afterwards
			wA := httptest.NewRecorder()
			dataA, err := createProxyAnswer(sdp, sid)
			So(err, ShouldBeNil)
			rA, err := http.NewRequest("POST", "snowflake.broker/answer", dataA)
			So(err, ShouldBeNil)
			proxyAnswers(i, wA, rA)
			So(wA.Code, ShouldEqual, http.StatusOK)

			<-done
			So(wC.Code, ShouldEqual, http.StatusOK)
			So(wC.Body.String(), ShouldEqual, fmt.Sprintf(`{"answer":%#q}`, sdp))
		})
	})
}

func TestSnowflakeHeap(t *testing.T) {
	Convey("SnowflakeHeap", t, func() {
		h := new(SnowflakeHeap)
		heap.Init(h)
		So(h.Len(), ShouldEqual, 0)
		s1 := new(Snowflake)
		s2 := new(Snowflake)
		s3 := new(Snowflake)
		s4 := new(Snowflake)
		s1.clients = 4
		s2.clients = 5
		s3.clients = 3
		s4.clients = 1

		heap.Push(h, s1)
		So(h.Len(), ShouldEqual, 1)
		heap.Push(h, s2)
		So(h.Len(), ShouldEqual, 2)
		heap.Push(h, s3)
		So(h.Len(), ShouldEqual, 3)
		heap.Push(h, s4)
		So(h.Len(), ShouldEqual, 4)

		heap.Remove(h, 0)
		So(h.Len(), ShouldEqual, 3)

		r := heap.Pop(h).(*Snowflake)
		So(h.Len(), ShouldEqual, 2)
		So(r.clients, ShouldEqual, 3)
		So(r.index, ShouldEqual, -1)

		r = heap.Pop(h).(*Snowflake)
		So(h.Len(), ShouldEqual, 1)
		So(r.clients, ShouldEqual, 4)
		So(r.index, ShouldEqual, -1)

		r = heap.Pop(h).(*Snowflake)
		So(h.Len(), ShouldEqual, 0)
		So(r.clients, ShouldEqual, 5)
		So(r.index, ShouldEqual, -1)
	})
}

func TestInvalidGeoipFile(t *testing.T) {
	Convey("Geoip", t, func() {
		// Make sure things behave properly if geoip file fails to load
		ctx := NewBrokerContext(NullLogger(), "", "")
		if err := ctx.metrics.LoadGeoipDatabases("invalid_filename", "invalid_filename6"); err != nil {
			log.Printf("loading geo ip databases returned error: %v", err)
		}
		ctx.metrics.UpdateCountryStats("127.0.0.1", "", NATUnrestricted)
		So(ctx.metrics.geoipdb, ShouldBeNil)

	})
}

func TestMetrics(t *testing.T) {
	Convey("Test metrics...", t, func() {
		done := make(chan bool)
		buf := new(bytes.Buffer)
		ctx := NewBrokerContext(log.New(buf, "", 0), "", "")
		i := &IPC{ctx}

		err := ctx.metrics.LoadGeoipDatabases("test_geoip", "test_geoip6")
		So(err, ShouldBeNil)

		//Test addition of proxy polls
		Convey("for proxy polls", func() {
			w := httptest.NewRecorder()
			data := bytes.NewReader([]byte("{\"Sid\":\"ymbcCMto7KHNGYlp\",\"Version\":\"1.0\"}"))
			r, err := http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p := <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			w = httptest.NewRecorder()
			data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"standalone"}`))
			r, err = http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p = <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			w = httptest.NewRecorder()
			data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"badge"}`))
			r, err = http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p = <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			w = httptest.NewRecorder()
			data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"webext"}`))
			r, err = http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p = <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done
			ctx.metrics.printMetrics()

			metricsStr := buf.String()
			So(metricsStr, ShouldStartWith, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips CA=4\n")
			So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-standalone 1\n")
			So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-badge 1\n")
			So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-webext 1\n")
			So(metricsStr, ShouldEndWith, `snowflake-ips-total 4
snowflake-idle-count 8
snowflake-proxy-poll-with-relay-url-count 0
snowflake-proxy-poll-without-relay-url-count 8
snowflake-proxy-rejected-for-relay-url-count 0
client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 0
client-http-ips 
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
snowflake-ips-nat-restricted 0
snowflake-ips-nat-unrestricted 0
snowflake-ips-nat-unknown 1
`)
		})

		//Test addition of client failures
		Convey("for no proxies available", func() {
			w := httptest.NewRecorder()
			data, err := createClientOffer(sdp, NATUnknown, "")
			So(err, ShouldBeNil)
			r, err := http.NewRequest("POST", "snowflake.broker/client", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)

			clientOffers(i, w, r)

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, `client-denied-count 8
client-restricted-denied-count 8
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 8
client-http-ips CA=8
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips `)

			// Test reset
			buf.Reset()
			ctx.metrics.zeroMetrics()
			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips \n")
			So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-standalone 0\n")
			So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-badge 0\n")
			So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-webext 0\n")
			So(buf.String(), ShouldContainSubstring, `snowflake-ips-total 0
snowflake-idle-count 0
snowflake-proxy-poll-with-relay-url-count 0
snowflake-proxy-poll-without-relay-url-count 0
snowflake-proxy-rejected-for-relay-url-count 0
client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 0
client-snowflake-timeout-count 0
client-http-count 0
client-http-ips 
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 0
client-sqs-ips 
snowflake-ips-nat-restricted 0
snowflake-ips-nat-unrestricted 0
snowflake-ips-nat-unknown 0
`)
		})
		//Test addition of client matches
		Convey("for client-proxy match", func() {
			w := httptest.NewRecorder()
			data, err := createClientOffer(sdp, NATUnknown, "")
			So(err, ShouldBeNil)
			r, err := http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)

			// Prepare a fake proxy to respond with.
			snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0)
			go func() {
				clientOffers(i, w, r)
				done <- true
			}()
			offer := <-snowflake.offerChannel
			So(offer.sdp, ShouldResemble, []byte(sdp))
			snowflake.answerChannel <- "fake answer"
			<-done

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 0\nclient-restricted-denied-count 0\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 8")
		})
		//Test rounding boundary
		Convey("binning boundary", func() {
			w := httptest.NewRecorder()
			data, err := createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err := http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 8\nclient-unrestricted-denied-count 0\n")

			w = httptest.NewRecorder()
			data, err = createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)
			clientOffers(i, w, r)

			buf.Reset()
			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 16\nclient-restricted-denied-count 16\nclient-unrestricted-denied-count 0\n")
		})

		//Test unique ip
		Convey("proxy counts by unique ip", func() {
			w := httptest.NewRecorder()
			data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`))
			r, err := http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p := <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`))
			r, err = http.NewRequest("POST", "snowflake.broker/proxy", data)
			if err != nil {
				log.Printf("unable to get NewRequest with error: %v", err)
			}
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p = <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			ctx.metrics.printMetrics()
			metricsStr := buf.String()
			So(metricsStr, ShouldContainSubstring, "snowflake-ips CA=1\n")
			So(metricsStr, ShouldContainSubstring, "snowflake-ips-total 1\n")
		})
		//Test NAT types
		Convey("proxy counts by NAT type", func() {
			w := httptest.NewRecorder()
			data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"unknown","NAT":"restricted"}`))
			r, err := http.NewRequest("POST", "snowflake.broker/proxy", data)
			r.RemoteAddr = "129.97.208.23:8888" //CA geoip
			So(err, ShouldBeNil)
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p := <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "snowflake-ips-nat-restricted 1\nsnowflake-ips-nat-unrestricted 0\nsnowflake-ips-nat-unknown 0")

			data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"unknown","NAT":"unrestricted"}`))
			r, err = http.NewRequest("POST", "snowflake.broker/proxy", data)
			if err != nil {
				log.Printf("unable to get NewRequest with error: %v", err)
			}
			r.RemoteAddr = "129.97.208.24:8888" //CA geoip
			go func(i *IPC) {
				proxyPolls(i, w, r)
				done <- true
			}(i)
			p = <-ctx.proxyPolls //manually unblock poll
			p.offerChannel <- nil
			<-done

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "snowflake-ips-nat-restricted 1\nsnowflake-ips-nat-unrestricted 1\nsnowflake-ips-nat-unknown 0")
		})

		Convey("client failures by NAT type", func() {
			w := httptest.NewRecorder()
			data, err := createClientOffer(sdp, NATRestricted, "")
			So(err, ShouldBeNil)
			r, err := http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)

			clientOffers(i, w, r)

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 8\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 0")

			buf.Reset()
			ctx.metrics.zeroMetrics()

			data, err = createClientOffer(sdp, NATUnrestricted, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)

			clientOffers(i, w, r)

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 0\nclient-unrestricted-denied-count 8\nclient-snowflake-match-count 0")

			buf.Reset()
			ctx.metrics.zeroMetrics()

			data, err = createClientOffer(sdp, NATUnknown, "")
			So(err, ShouldBeNil)
			r, err = http.NewRequest("POST", "snowflake.broker/client", data)
			So(err, ShouldBeNil)

			clientOffers(i, w, r)

			ctx.metrics.printMetrics()
			So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 8\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 0")
		})
		Convey("for country stats order", func() {

			stats := map[string]int{
				"IT": 50,
				"FR": 200,
				"TZ": 100,
				"CN": 250,
				"RU": 150,
				"CA": 1,
				"BE": 1,
				"PH": 1,
			}
			ctx.metrics.countryStats.counts = stats
			So(ctx.metrics.countryStats.Display(), ShouldEqual, "CN=250,FR=200,RU=150,TZ=100,IT=50,BE=1,CA=1,PH=1")
		})
	})
}
07070100000014000081A400000000000000000000000167D9BD4E00000445000000000000000000000000000000000000002A00000000snowflake-2.11.0/broker/snowflake-heap.go/*
Keeping track of pending available snowflake proxies.
*/

package main

/*
The Snowflake struct contains a single interaction
over the offer and answer channels.
*/
type Snowflake struct {
	id            string
	proxyType     string
	natType       string
	offerChannel  chan *ClientOffer
	answerChannel chan string
	clients       int
	index         int
}

// Implements heap.Interface, and holds Snowflakes.
type SnowflakeHeap []*Snowflake

func (sh SnowflakeHeap) Len() int { return len(sh) }

func (sh SnowflakeHeap) Less(i, j int) bool {
	// Snowflakes serving less clients should sort earlier.
	return sh[i].clients < sh[j].clients
}

func (sh SnowflakeHeap) Swap(i, j int) {
	sh[i], sh[j] = sh[j], sh[i]
	sh[i].index = i
	sh[j].index = j
}

func (sh *SnowflakeHeap) Push(s interface{}) {
	n := len(*sh)
	snowflake := s.(*Snowflake)
	snowflake.index = n
	*sh = append(*sh, snowflake)
}

// Only valid when Len() > 0.
func (sh *SnowflakeHeap) Pop() interface{} {
	flakes := *sh
	n := len(flakes)
	snowflake := flakes[n-1]
	snowflake.index = -1
	*sh = flakes[0 : n-1]
	return snowflake
}
07070100000015000081A400000000000000000000000167D9BD4E00001C35000000000000000000000000000000000000001F00000000snowflake-2.11.0/broker/sqs.gopackage main

import (
	"context"
	"log"
	"strconv"
	"strings"
	"time"

	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/service/sqs"
	"github.com/aws/aws-sdk-go-v2/service/sqs/types"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

const (
	cleanupThreshold = -2 * time.Minute
)

type sqsHandler struct {
	SQSClient       sqsclient.SQSClient
	SQSQueueURL     *string
	IPC             *IPC
	cleanupInterval time.Duration
}

func (r *sqsHandler) pollMessages(ctx context.Context, chn chan<- *types.Message) {
	for {
		select {
		case <-ctx.Done():
			// if context is cancelled
			return
		default:
			res, err := r.SQSClient.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{
				QueueUrl:            r.SQSQueueURL,
				MaxNumberOfMessages: 10,
				WaitTimeSeconds:     15,
				MessageAttributeNames: []string{
					string(types.QueueAttributeNameAll),
				},
			})

			if err != nil {
				log.Printf("SQSHandler: encountered error while polling for messages: %v\n", err)
				continue
			}

			for _, message := range res.Messages {
				chn <- &message
			}
		}
	}
}

func (r *sqsHandler) cleanupClientQueues(ctx context.Context) {
	for range time.NewTicker(r.cleanupInterval).C {
		// Runs at fixed intervals to clean up any client queues that were last changed more than 2 minutes ago
		select {
		case <-ctx.Done():
			// if context is cancelled
			return
		default:
			queueURLsList := []string{}
			var nextToken *string
			for {
				res, err := r.SQSClient.ListQueues(ctx, &sqs.ListQueuesInput{
					QueueNamePrefix: aws.String("snowflake-client-"),
					MaxResults:      aws.Int32(1000),
					NextToken:       nextToken,
				})
				if err != nil {
					log.Printf("SQSHandler: encountered error while retrieving client queues to clean up: %v\n", err)
					// client queues will be cleaned up the next time the cleanup operation is triggered automatically
					break
				}
				queueURLsList = append(queueURLsList, res.QueueUrls...)
				if res.NextToken == nil {
					break
				} else {
					nextToken = res.NextToken
				}
			}

			numDeleted := 0
			cleanupCutoff := time.Now().Add(cleanupThreshold)
			for _, queueURL := range queueURLsList {
				if !strings.Contains(queueURL, "snowflake-client-") {
					continue
				}
				res, err := r.SQSClient.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{
					QueueUrl:       aws.String(queueURL),
					AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp},
				})
				if err != nil {
					// According to the AWS SQS docs, the deletion process for a queue can take up to 60 seconds. So the queue
					// can be in the process of being deleted, but will still be returned by the ListQueues operation, but
					// fail when we try to GetQueueAttributes for the queue
					log.Printf("SQSHandler: encountered error while getting attribute of client queue %s. queue may already be deleted.\n", queueURL)
					continue
				}
				lastModifiedInt64, err := strconv.ParseInt(res.Attributes[string(types.QueueAttributeNameLastModifiedTimestamp)], 10, 64)
				if err != nil {
					log.Printf("SQSHandler: encountered invalid lastModifiedTimetamp value from client queue %s: %v\n", queueURL, err)
					continue
				}
				lastModified := time.Unix(lastModifiedInt64, 0)
				if lastModified.Before(cleanupCutoff) {
					_, err := r.SQSClient.DeleteQueue(ctx, &sqs.DeleteQueueInput{
						QueueUrl: aws.String(queueURL),
					})
					if err != nil {
						log.Printf("SQSHandler: encountered error when deleting client queue %s: %v\n", queueURL, err)
						continue
					} else {
						numDeleted += 1
					}

				}
			}
			log.Printf("SQSHandler: finished running iteration of client queue cleanup. found and deleted %d client queues.\n", numDeleted)
		}
	}
}

func (r *sqsHandler) handleMessage(context context.Context, message *types.Message) {
	var encPollReq []byte
	var response []byte
	var err error

	clientID := message.MessageAttributes["ClientID"].StringValue
	if clientID == nil {
		log.Println("SQSHandler: got SDP offer in SQS message with no client ID. ignoring this message.")
		return
	}

	res, err := r.SQSClient.CreateQueue(context, &sqs.CreateQueueInput{
		QueueName: aws.String("snowflake-client-" + *clientID),
	})
	if err != nil {
		log.Printf("SQSHandler: error encountered when creating answer queue for client %s: %v\n", *clientID, err)
		return
	}
	answerSQSURL := res.QueueUrl

	encPollReq = []byte(*message.Body)

	// Get best guess Client IP for geolocating
	remoteAddr := ""
	req, err := messages.DecodeClientPollRequest(encPollReq)
	if err != nil {
		log.Printf("SQSHandler: error encounted when decoding client poll request %s: %v\n", *clientID, err)
	} else {
		sdp, err := util.DeserializeSessionDescription(req.Offer)
		if err != nil {
			log.Printf("SQSHandler: error encounted when deserializing session desc %s: %v\n", *clientID, err)
		} else {
			candidateAddrs := util.GetCandidateAddrs(sdp.SDP)
			if len(candidateAddrs) > 0 {
				remoteAddr = candidateAddrs[0].String()
			}
		}
	}

	arg := messages.Arg{
		Body:             encPollReq,
		RemoteAddr:       remoteAddr,
		RendezvousMethod: messages.RendezvousSqs,
	}
	err = r.IPC.ClientOffers(arg, &response)

	if err != nil {
		log.Printf("SQSHandler: error encountered when handling message: %v\n", err)
		return
	}

	r.SQSClient.SendMessage(context, &sqs.SendMessageInput{
		QueueUrl:    answerSQSURL,
		MessageBody: aws.String(string(response)),
	})
}

func (r *sqsHandler) deleteMessage(context context.Context, message *types.Message) {
	r.SQSClient.DeleteMessage(context, &sqs.DeleteMessageInput{
		QueueUrl:      r.SQSQueueURL,
		ReceiptHandle: message.ReceiptHandle,
	})
}

func newSQSHandler(context context.Context, client sqsclient.SQSClient, sqsQueueName string, region string, i *IPC) (*sqsHandler, error) {
	// Creates the queue if a queue with the same name doesn't exist. If a queue with the same name and attributes
	// already exists, then nothing will happen. If a queue with the same name, but different attributes exists, then
	// an error will be returned
	res, err := client.CreateQueue(context, &sqs.CreateQueueInput{
		QueueName: aws.String(sqsQueueName),
		Attributes: map[string]string{
			"MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10),
		},
	})

	if err != nil {
		return nil, err
	}

	return &sqsHandler{
		SQSClient:       client,
		SQSQueueURL:     res.QueueUrl,
		IPC:             i,
		cleanupInterval: time.Second * 30,
	}, nil
}

func (r *sqsHandler) PollAndHandleMessages(ctx context.Context) {
	log.Println("SQSHandler: Starting to poll for messages at: " + *r.SQSQueueURL)
	messagesChn := make(chan *types.Message, 20)
	go r.pollMessages(ctx, messagesChn)
	go r.cleanupClientQueues(ctx)

	for message := range messagesChn {
		select {
		case <-ctx.Done():
			// if context is cancelled
			return
		default:
			go func(msg *types.Message) {
				r.handleMessage(ctx, msg)
				r.deleteMessage(ctx, msg)
			}(message)
		}
	}
}
07070100000016000081A400000000000000000000000167D9BD4E00002D8A000000000000000000000000000000000000002400000000snowflake-2.11.0/broker/sqs_test.gopackage main

import (
	"bytes"
	"context"
	"errors"
	"log"
	"strconv"
	"sync"
	"sync/atomic"
	"testing"
	"time"

	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/service/sqs"
	"github.com/aws/aws-sdk-go-v2/service/sqs/types"
	"github.com/golang/mock/gomock"
	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient"
)

func TestSQS(t *testing.T) {

	Convey("Context", t, func() {
		buf := new(bytes.Buffer)
		ipcCtx := NewBrokerContext(log.New(buf, "", 0), "", "")
		i := &IPC{ipcCtx}

		Convey("Responds to SQS client offers...", func() {
			ctrl := gomock.NewController(t)
			mockSQSClient := sqsclient.NewMockSQSClient(ctrl)

			brokerSQSQueueName := "example-name"
			responseQueueURL := aws.String("https://sqs.us-east-1.amazonaws.com/testing")

			runSQSHandler := func(sqsHandlerContext context.Context) {
				mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqs.CreateQueueInput{
					QueueName: aws.String(brokerSQSQueueName),
					Attributes: map[string]string{
						"MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10),
					},
				}).Return(&sqs.CreateQueueOutput{
					QueueUrl: responseQueueURL,
				}, nil).Times(1)
				sqsHandler, err := newSQSHandler(sqsHandlerContext, mockSQSClient, brokerSQSQueueName, "example-region", i)
				So(err, ShouldBeNil)
				go sqsHandler.PollAndHandleMessages(sqsHandlerContext)
			}

			messageBody := aws.String("1.0\n{\"offer\": \"fake\", \"nat\": \"unknown\"}")
			receiptHandle := "fake-receipt-handle"
			sqsReceiveMessageInput := sqs.ReceiveMessageInput{
				QueueUrl:            responseQueueURL,
				MaxNumberOfMessages: 10,
				WaitTimeSeconds:     15,
				MessageAttributeNames: []string{
					string(types.QueueAttributeNameAll),
				},
			}
			sqsDeleteMessageInput := sqs.DeleteMessageInput{
				QueueUrl:      responseQueueURL,
				ReceiptHandle: &receiptHandle,
			}

			Convey("by ignoring it if no client id specified", func(c C) {
				sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())
				mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).MinTimes(1).DoAndReturn(
					func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) {
						return &sqs.ReceiveMessageOutput{
							Messages: []types.Message{
								{
									Body:          messageBody,
									ReceiptHandle: &receiptHandle,
								},
							},
						}, nil
					},
				)
				mockSQSClient.EXPECT().DeleteMessage(sqsHandlerContext, &sqsDeleteMessageInput).MinTimes(1).Do(
					func(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) {
						sqsCancelFunc()
					},
				)
				// We expect no queues to be created
				mockSQSClient.EXPECT().CreateQueue(gomock.Any(), gomock.Any()).Times(0)
				runSQSHandler(sqsHandlerContext)
				<-sqsHandlerContext.Done()
			})

			Convey("by doing nothing if an error occurs upon receipt of the message", func(c C) {

				sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())

				mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).MinTimes(1).DoAndReturn(
					func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) {
						sqsCancelFunc()
						return nil, errors.New("error")
					},
				)
				// We expect no queues to be created or deleted
				mockSQSClient.EXPECT().CreateQueue(gomock.Any(), gomock.Any()).Times(0)
				mockSQSClient.EXPECT().DeleteMessage(gomock.Any(), gomock.Any()).Times(0)
				runSQSHandler(sqsHandlerContext)
				<-sqsHandlerContext.Done()
			})

			Convey("by attempting to create a new sqs queue...", func() {
				clientId := "fake-id"
				sqsCreateQueueInput := sqs.CreateQueueInput{
					QueueName: aws.String("snowflake-client-fake-id"),
				}
				validMessage := &sqs.ReceiveMessageOutput{
					Messages: []types.Message{
						{
							Body: messageBody,
							MessageAttributes: map[string]types.MessageAttributeValue{
								"ClientID": {StringValue: &clientId},
							},
							ReceiptHandle: &receiptHandle,
						},
					},
				}
				Convey("and does not attempt to send a message via SQS if queue creation fails.", func(c C) {
					sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())

					mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).AnyTimes().DoAndReturn(
						func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) {
							sqsCancelFunc()
							return validMessage, nil
						})
					mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqsCreateQueueInput).Return(nil, errors.New("error")).AnyTimes()
					mockSQSClient.EXPECT().DeleteMessage(sqsHandlerContext, &sqsDeleteMessageInput).AnyTimes()
					runSQSHandler(sqsHandlerContext)
					<-sqsHandlerContext.Done()
				})

				Convey("and responds with a proxy answer if available.", func(c C) {
					sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())
					var numTimes atomic.Uint32

					mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).AnyTimes().DoAndReturn(
						func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) {

							n := numTimes.Add(1)
							if n == 1 {
								snowflake := ipcCtx.AddSnowflake("fake", "", NATUnrestricted, 0)
								go func(c C) {
									<-snowflake.offerChannel
									snowflake.answerChannel <- "fake answer"
								}(c)
								return validMessage, nil
							}
							return nil, errors.New("error")

						})
					mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqsCreateQueueInput).Return(&sqs.CreateQueueOutput{
						QueueUrl: responseQueueURL,
					}, nil).AnyTimes()
					mockSQSClient.EXPECT().DeleteMessage(gomock.Any(), gomock.Any()).AnyTimes()
					mockSQSClient.EXPECT().SendMessage(sqsHandlerContext, gomock.Any()).Times(1).DoAndReturn(
						func(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error) {
							c.So(input.MessageBody, ShouldEqual, aws.String("{\"answer\":\"fake answer\"}"))
							// Ensure that match is correctly recorded in metrics
							ipcCtx.metrics.printMetrics()
							c.So(buf.String(), ShouldContainSubstring, `client-denied-count 0
client-restricted-denied-count 0
client-unrestricted-denied-count 0
client-snowflake-match-count 8
client-snowflake-timeout-count 0
client-http-count 0
client-http-ips 
client-ampcache-count 0
client-ampcache-ips 
client-sqs-count 8
client-sqs-ips ??=8
`)
							sqsCancelFunc()
							return &sqs.SendMessageOutput{}, nil
						},
					)
					runSQSHandler(sqsHandlerContext)

					<-sqsHandlerContext.Done()
				})
			})
		})

		Convey("Cleans up SQS client queues...", func() {
			brokerSQSQueueName := "example-name"
			responseQueueURL := aws.String("https://sqs.us-east-1.amazonaws.com/testing")

			ctrl := gomock.NewController(t)
			mockSQSClient := sqsclient.NewMockSQSClient(ctrl)

			runSQSHandler := func(sqsHandlerContext context.Context) {

				mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqs.CreateQueueInput{
					QueueName: aws.String(brokerSQSQueueName),
					Attributes: map[string]string{
						"MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10),
					},
				}).Return(&sqs.CreateQueueOutput{
					QueueUrl: responseQueueURL,
				}, nil).Times(1)

				mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, gomock.Any()).AnyTimes().Return(
					&sqs.ReceiveMessageOutput{
						Messages: []types.Message{},
					}, nil,
				)

				sqsHandler, err := newSQSHandler(sqsHandlerContext, mockSQSClient, brokerSQSQueueName, "example-region", i)
				So(err, ShouldBeNil)
				// Set the cleanup interval to 1 ns so we can immediately test the cleanup logic
				sqsHandler.cleanupInterval = time.Nanosecond

				go sqsHandler.PollAndHandleMessages(sqsHandlerContext)
			}

			Convey("does nothing if there are no open queues.", func() {
				var wg sync.WaitGroup
				wg.Add(1)
				sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())
				defer wg.Wait()

				mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{
					QueueNamePrefix: aws.String("snowflake-client-"),
					MaxResults:      aws.Int32(1000),
					NextToken:       nil,
				}).DoAndReturn(func(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) {
					wg.Done()
					// Cancel the handler context since we are only interested in testing one iteration of the cleanup
					sqsCancelFunc()
					return &sqs.ListQueuesOutput{
						QueueUrls: []string{},
					}, nil
				})

				runSQSHandler(sqsHandlerContext)
			})

			Convey("deletes open queue when there is one open queue.", func(c C) {
				var wg sync.WaitGroup
				wg.Add(1)
				sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background())

				clientQueueUrl1 := "https://sqs.us-east-1.amazonaws.com/snowflake-client-1"
				clientQueueUrl2 := "https://sqs.us-east-1.amazonaws.com/snowflake-client-2"

				gomock.InOrder(
					mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{
						QueueNamePrefix: aws.String("snowflake-client-"),
						MaxResults:      aws.Int32(1000),
						NextToken:       nil,
					}).Times(1).Return(&sqs.ListQueuesOutput{
						QueueUrls: []string{
							clientQueueUrl1,
							clientQueueUrl2,
						},
					}, nil),
					mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{
						QueueNamePrefix: aws.String("snowflake-client-"),
						MaxResults:      aws.Int32(1000),
						NextToken:       nil,
					}).Times(1).DoAndReturn(func(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) {
						// Executed on second iteration of cleanupClientQueues loop. This means that one full iteration has completed and we can verify the results of that iteration
						wg.Done()
						sqsCancelFunc()
						return &sqs.ListQueuesOutput{
							QueueUrls: []string{},
						}, nil
					}),
				)

				gomock.InOrder(
					mockSQSClient.EXPECT().GetQueueAttributes(sqsHandlerContext, &sqs.GetQueueAttributesInput{
						QueueUrl:       aws.String(clientQueueUrl1),
						AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp},
					}).Times(1).Return(&sqs.GetQueueAttributesOutput{
						Attributes: map[string]string{
							string(types.QueueAttributeNameLastModifiedTimestamp): "0",
						}}, nil),

					mockSQSClient.EXPECT().GetQueueAttributes(sqsHandlerContext, &sqs.GetQueueAttributesInput{
						QueueUrl:       aws.String(clientQueueUrl2),
						AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp},
					}).Times(1).Return(&sqs.GetQueueAttributesOutput{
						Attributes: map[string]string{
							string(types.QueueAttributeNameLastModifiedTimestamp): "0",
						}}, nil),
				)

				gomock.InOrder(
					mockSQSClient.EXPECT().DeleteQueue(sqsHandlerContext, &sqs.DeleteQueueInput{
						QueueUrl: aws.String(clientQueueUrl1),
					}).Return(&sqs.DeleteQueueOutput{}, nil),
					mockSQSClient.EXPECT().DeleteQueue(sqsHandlerContext, &sqs.DeleteQueueInput{
						QueueUrl: aws.String(clientQueueUrl2),
					}).Return(&sqs.DeleteQueueOutput{}, nil),
				)

				runSQSHandler(sqsHandlerContext)
				wg.Wait()
			})
		})
	})
}
07070100000017000081A400000000000000000000000167D9BD4E00000113000000000000000000000000000000000000002C00000000snowflake-2.11.0/broker/test_bridgeList.txt{"displayName":"flakey", "webSocketAddress":"wss://snowflake.torproject.net", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"}
{"displayName":"second", "webSocketAddress":"wss://02.snowflake.torproject.net", "fingerprint":"8838024498816A039FCBBAB14E6F40A0843051FA"}
07070100000018000081A400000000000000000000000167D9BD4E000078EA000000000000000000000000000000000000002300000000snowflake-2.11.0/broker/test_geoip# Last updated based on February 7 2018 Maxmind GeoLite2 Country
# wget https://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz
# gunzip GeoLite2-Country.mmdb.gz
# python mmdb-convert.py GeoLite2-Country.mmdb
16777216,16777471,AU
16777472,16778239,CN
16778240,16779263,AU
16779264,16781311,CN
16781312,16785407,JP
16785408,16793599,CN
16793600,16809983,JP
16809984,16842751,TH
16842752,16843007,CN
16843008,16843263,AU
16843264,16859135,CN
16859136,16875519,JP
16875520,16908287,TH
16908288,16909055,CN
16909056,16909311,US
16909312,16941055,CN
16941056,16973823,TH
16973824,17039359,CN
17039360,17039615,AU
2111307776,2111832063,CN
2111832064,2112487423,TW
2112487424,2112618495,VN
2112618496,2112880639,NZ
2112880640,2113560063,KR
2113560064,2113560319,SG
2113560320,2113683455,KR
2113683456,2113684607,JP
2113684608,2113684671,TW
2113684672,2113685663,JP
2113685664,2113685695,SG
2113685696,2113687999,JP
2113688000,2113688031,AU
2113688032,2113688959,JP
2113688960,2113688991,SG
2113688992,2113691135,JP
2113691136,2113691391,SG
2113691392,2113692415,JP
2113692416,2113692671,HK
2113692672,2113693599,JP
2113693600,2113693615,HK
2113693616,2113693879,JP
2113693880,2113693887,AU
2113693888,2113693951,JP
2113693952,2113694207,HK
2113694208,2113695279,JP
2113695280,2113695287,SG
2113695288,2113716223,JP
2113716224,2113724927,SG
2113724928,2113725183,IN
2113725184,2113728511,SG
2113728512,2113732607,JP
2113732608,2113761279,AU
2113761280,2113765375,VN
2113765376,2113798143,HK
2113798144,2113811455,AU
2113811456,2113812479,GB
2113812480,2113813503,JP
2113813504,2113830911,AU
2113830912,2113863679,CN
2113863680,2113929215,AU
2113929216,2130706431,JP
2147483648,2147483903,NL
2147483904,2147484671,RO
2147484672,2147485695,TR
2147485696,2147487743,DK
2147487744,2147489791,NO
2147489792,2147491839,RU
2147491840,2147494911,DE
2147494912,2147495167,RO
2147495168,2147495423,DE
2147495424,2147496959,RO
2147496960,2147497215,ES
2147497216,2147497471,RO
2147497472,2147497727,PL
2147497728,2147498239,DE
2147498240,2147498495,RO
2147498496,2147500031,DE
2147500032,2147501055,NL
2147501056,2147501311,SK
2147501312,2147501567,NL
2147501568,2147501823,GL
2147501824,2147502079,US
2147502080,2147504127,DK
2147504128,2147508223,RU
2147508224,2147510271,DE
2147510272,2147510783,UA
2147510784,2147511039,RU
2147511040,2147512319,CY
2147512320,2147514879,DE
2147514880,2147516415,IT
2147516416,2147520511,RU
2147520512,2147524607,DE
2147524608,2147526655,RU
2147526656,2147528703,UA
2147528704,2147532799,CZ
2147532800,2147534847,DE
2147534848,2147549183,CY
2147549184,2147557375,US
2147557376,2147557631,TW
2147557632,2147557887,SG
2147557888,2147558143,DE
2147558144,2147558399,TH
2147558400,2147558655,KR
2147558656,2147558911,TW
2147558912,2147559167,SG
2147559168,2147559423,TH
2147559424,2147559679,SG
2147559680,2147559935,US
2147559936,2147560191,DE
2147560192,2147560447,RU
2147560448,2147560703,TH
2147560704,2147560959,TW
2147560960,2147562239,US
2147562240,2147562495,RU
2147562496,2147563263,US
2147563264,2147563519,RU
2147563520,2147564287,US
2147564288,2147564543,AE
2147564544,2147564799,US
2147564800,2147565055,SG
2147565056,2147565311,HK
2147565312,2147565999,TW
2147566000,2147566047,JP
2147566048,2147566079,TW
2147566080,2147569407,US
2147569408,2147569663,TH
2147569664,2147570431,US
2147570432,2147570687,JP
2147570688,2147571455,US
2147571456,2147571711,SG
2147571712,2147573503,US
2147573504,2147573759,SG
2147573760,2147575039,US
2147575040,2147575551,TW
2147575552,2147575807,SG
2147575808,2147576575,US
2147576576,2147576831,TW
2147576832,2147577087,TH
2147577088,2147577599,ID
2147577600,2147579647,US
2147579648,2147579903,ID
2147579904,2147580927,US
2147580928,2147581183,ID
2147581184,2147581439,TH
2147581440,2147592703,US
2147592704,2147592959,HK
2147592960,2147600127,US
2147600128,2147600383,SG
2147600384,2147603711,US
2147603712,2147603967,IN
2147603968,2147942399,US
2147942400,2148007935,DE
2148007936,2148220515,US
2148220516,2148220535,AU
2148220536,2148229151,US
2148229152,2148229183,CA
2148229184,2148459007,US
2148459008,2148459519,TW
2148459520,2148532223,US
2148532224,2148597759,GB
2148597760,2148925439,US
2148925440,2148990975,JP
2148990976,2149253119,US
2149253120,2149384191,JP
2149384192,2150039551,US
2150039552,2150105087,NO
2150105088,2150203391,GB
2150203392,2150236159,AF
2150236160,2150301695,US
2150301696,2150367231,CA
2150367232,2150432767,US
2150432768,2150498303,IT
2150498304,2150957055,US
2150957056,2151022591,JP
2151022592,2151743487,US
2151743488,2151759871,BY
2151759872,2151768063,US
2151768064,2151770111,GB
2151770112,2151772159,BA
2151772160,2151776255,IT
2151776256,2151778303,AT
2151778304,2151780351,RU
2151780352,2151782399,DE
2151782400,2151784447,ES
2151784448,2151792639,IR
2151792640,2151794687,CH
2151794688,2151796735,IT
2151796736,2151800831,DE
2151800832,2151809023,PT
2151809024,2151940095,IT
2151940096,2152464383,RU
2152464384,2152529919,DK
2152529920,2152562687,NO
2152562688,2152595455,DK
2152595456,2152726527,FR
2152726528,2153119743,US
2153119744,2153185279,GB
2153185280,2153250815,SE
2153250816,2153381887,US
2153381888,2153382143,JP
2153382144,2153383679,US
2153383680,2153383935,HK
2153383936,2153384447,US
2153384448,2153385471,GB
2153385472,2153385599,AT
2153385600,2153385663,CZ
2153385664,2153385727,FI
2153385728,2153385791,PL
2153385792,2153385855,PT
2153385856,2153385919,TR
2153385920,2153385983,US
2153385984,2153387007,GB
2153387008,2153387263,CH
2153387264,2153387519,IS
2153387520,2153387775,IE
2153387776,2153388031,CH
2153388032,2153388287,ES
2153388288,2153388543,PL
2153388544,2153391615,US
2153391616,2153391871,HK
2153391872,2153394431,US
2153394432,2153394943,SG
2153394944,2153395455,US
2153395456,2153395711,VN
2153395712,2153396991,US
2153396992,2153397247,IL
2153397248,2153397503,IN
2153397504,2153397759,SA
2153397760,2153398015,QA
2153398016,2153398271,BH
2153398272,2153398783,JP
2153398784,2153399551,US
2153399552,2153399807,KR
2153399808,2153400319,HK
2153400320,2153401087,TW
2153401088,2153401599,MO
2153401600,2153402111,VN
2153402112,2153402367,PH
2153402368,2153403135,KR
2153403136,2153406463,US
2153406464,2153407487,JP
2153407488,2153407743,HK
2153407744,2153407999,AE
2153408000,2153408511,BR
2153408512,2153408767,AU
2153408768,2153409023,PA
2153409024,2153409279,AR
2153409280,2153409535,CR
2153409536,2153409791,CO
2153409792,2153410047,MX
2153410048,2153410303,CA
2153410304,2153410559,TW
2153410560,2153410815,PA
2153410816,2153411071,AR
2153411072,2153411327,CR
2153411328,2153411583,CO
2153411584,2153411839,MX
2153411840,2153412095,SV
2153412096,2153412351,TW
2153412352,2153412607,UY
2153412608,2153413119,AU
2153413120,2153413631,BR
2153413632,2153578495,US
2153578496,2153644031,FR
2153644032,2153906175,US
2153906176,2153971711,GB
2153971712,2154037247,US
2154037248,2154102783,CA
2154102784,2154430463,US
2154430464,2154495999,SG
2154496000,2154561535,US
2154561536,2154627071,CN
2154627072,2155610111,US
2155610112,2155675647,UA
2155675648,2155806719,US
2155806720,2155808767,IT
2155810816,2155812863,FR
2155812864,2155814911,GB
2155814912,2155819007,NL
2155819008,2155819519,DE
2155819520,2155821055,CH
2155821056,2155823103,IT
2155823104,2155825151,DE
2155825152,2155827199,AE
2155827200,2155831295,PL
2155831296,2155833343,RU
2155833344,2155833855,SE
2155833856,2155834623,NL
2155834624,2155834879,LU
2155834880,2155835391,NL
2155835392,2155839487,RO
2155839488,2155843583,FR
2155843584,2155845631,RU
2155845632,2155847679,DE
2155847680,2155849727,ES
2155849728,2155851775,TR
2155853824,2155855871,SE
2155855872,2155872255,SA
2155872256,2156003327,US
2156003328,2156134399,AT
2156134400,2156265471,US
2156265472,2156331007,KR
2156331008,2156593151,US
2156593152,2156658687,IL
2156658688,2156691455,IR
2156691456,2156697599,FR
2156697600,2156699647,GR
2156699648,2156703743,RU
2156703744,2156707839,BG
2156707840,2156709887,RU
2156709888,2156711935,ES
2156711936,2156713983,DE
2156713984,2156716031,NL
2156716032,2156718079,RO
2156718080,2156720127,IS
2156720128,2156724223,BY
2156724224,2156855295,CH
2156855296,2156920831,US
2156920832,2156986367,CA
2156986368,2159017983,US
2159017984,2159083519,DE
2159083520,2159149055,US
2159149056,2159280127,CH
2159280128,2159542271,US
2159542272,2159607807,AU
2159607808,2159673343,IN
2159673344,2159869951,US
2159869952,2159935487,CA
2159935488,2160525311,US
2160525312,2160533503,SG
2160533504,2160541695,NL
2160541696,2160590847,SG
2160590848,2160656383,US
2160656384,2160657407,BR
2160657408,2160658431,HN
2160658432,2160661503,BR
2160661504,2160662527,AR
2160662528,2160664575,BR
2160664576,2160666623,CL
2160666624,2160676863,BR
2160676864,2160677887,AR
2160677888,2160678911,BR
2160678912,2160679935,GF
2160679936,2160684031,BR
2160684032,2160685055,AR
2160685056,2160686079,DO
2160686080,2160687103,CL
2160687104,2160690175,BR
2160690176,2160691199,AR
2160691200,2160693247,BR
2160693248,2160694271,CR
2160694272,2160697343,BR
2160697344,2160698367,EC
2160698368,2160699391,BR
2160699392,2160700415,AR
2160700416,2160713727,BR
2160713728,2160714751,CL
2160714752,2160716799,BR
2160716800,2160717823,AR
2160717824,2160721919,BR
2160721920,2160852991,US
2160852992,2160885759,RU
2160885760,2160893951,AT
2160893952,2160902143,RU
2160902144,2160906239,NL
2160906240,2160908287,FR
2160908288,2160910335,PL
2160910336,2160914431,NL
2160914432,2160918527,SA
2160918528,2161508351,US
2161508352,2161573887,FI
2161573888,2162687999,US
2162688000,2162753535,GB
2162753536,2162819071,CA
2162819072,2162884607,SA
2162884608,2163212287,US
2163212288,2163277823,GB
2163277824,2163408895,US
2163408896,2163474431,GB
2163474432,2163605503,US
2163605504,2163638271,DE
2163638272,2163638527,US
2163638528,2163671039,DE
2163671040,2163867647,US
2163867648,2163933183,AU
2163933184,2164260863,US
2164260864,2164326399,CM
2164326400,2164981759,US
2164981760,2165112831,GB
2165112832,2165178367,DE
2165178368,2165309439,US
2165309440,2165374975,SE
2165374976,2165440511,US
2165440512,2165506047,NG
2165506048,2165571583,US
2165571584,2165637119,FR
2165637120,2165964799,US
2165964800,2166030335,DE
2166030336,2166095871,AT
2166095872,2166161407,CN
2166161408,2166292479,US
2166292480,2166358015,GB
2166358016,2166562559,US
2166562560,2166562815,FI
2166562816,2166571007,US
2166571008,2166575103,GB
2166575104,2166594559,US
2166594560,2166594815,PL
2166594816,2166729471,US
2166729472,2166729727,CA
2166729728,2167209983,US
2167209984,2167242751,DZ
2167242752,2167275519,BF
2167275520,2167930879,US
2167930880,2167996415,NG
2167996416,2168193023,US
2168193024,2168258559,JP
2168258560,2168651775,US
2168651776,2168717311,GB
2168717312,2168782847,US
2168782848,2168913919,DE
2168913920,2169372671,US
2169372672,2169438207,AU
2169438208,2170028031,US
2170028032,2170093567,FR
2170093568,2170159103,US
2170159104,2170224639,VE
2170224640,2170421247,US
2170421248,2170486783,AU
2170486784,2170552319,US
2170552320,2170617855,AU
2170617856,2170683391,CA
2170683392,2170814463,US
2170814464,2170879999,CA
2170880000,2170945535,US
2170945536,2171011071,FR
3652593408,3652593471,ES
3652593472,3652593511,FR
3652593512,3652593519,ES
3652593520,3652593631,FR
3652593632,3652593663,PT
3652593664,3652593943,FR
3652593944,3652593951,ES
3652593952,3652595007,FR
3652595008,3652595071,DE
3652595072,3652595167,FR
3652595168,3652595183,ES
3652595184,3652595871,FR
3652595872,3652595935,PL
3652595936,3652596351,FR
3652596352,3652596415,IT
3652596416,3652596479,FR
3652596480,3652596543,ES
3652596544,3652596799,FR
3652596800,3652596831,CZ
3652596832,3652597183,FR
3652597184,3652597247,DE
3652597248,3652597375,FR
3652597376,3652597383,ES
3652597384,3652597407,FR
3652597408,3652597439,PL
3652597440,3652597887,FR
3652597888,3652597903,GB
3652597904,3652599569,FR
3652599570,3652599570,PT
3652599571,3652599679,FR
3652599680,3652599743,IT
3652599744,3652601855,FR
3652601856,3652603903,PL
3652603904,3652608191,FR
3652608192,3652608223,PT
3652608224,3652608255,FR
3652608256,3652608511,GB
3652608512,3652608639,FR
3652608640,3652608767,GB
3652608768,3652609023,FR
3652609024,3652609279,GB
3652609280,3652609503,FR
3652609504,3652609535,FI
3652609536,3652609727,FR
3652609728,3652609759,PL
3652609760,3652609791,CZ
3652609792,3652609823,FR
3652609824,3652609855,CZ
3652609856,3652609919,FR
3652609920,3652609983,ES
3652609984,3652610047,BE
3652610048,3652611135,FR
3652611136,3652611199,ES
3652611200,3652611231,FR
3652611232,3652611263,PT
3652611264,3652611679,FR
3652611680,3652611711,PT
3652611712,3652611775,NL
3652611776,3652612223,FR
3652612224,3652612287,ES
3652612288,3652612351,FR
3652612352,3652612479,GB
3652612480,3652612543,IE
3652612544,3652612607,NL
3652612608,3652613335,FR
3652613336,3652613343,ES
3652613344,3652613375,FR
3652613376,3652613407,FI
3652613408,3652613615,FR
3652613616,3652613623,ES
3652613624,3652613679,FR
3652613680,3652613695,LT
3652613696,3652614015,FR
3652614016,3652614079,BE
3652614080,3652615871,FR
3652615872,3652615935,DE
3652615936,3652620639,FR
3652620640,3652620671,CZ
3652620672,3652620735,PT
3652620736,3652620799,FR
3652620800,3652620831,PT
3652620832,3652621247,FR
3652621248,3652621311,DE
3652621312,3652621375,FR
3652621376,3652621439,ES
3652621440,3652621503,FR
3652621504,3652621567,IT
3652621568,3652621631,FR
3652621632,3652621663,PT
3652621664,3652621823,FR
3652621824,3652621951,IE
3652621952,3652622271,FR
3652622272,3652622335,GB
3652622336,3652622879,FR
3652622880,3652622911,CZ
3652622912,3652623679,FR
3652623680,3652623807,NL
3652623808,3652624191,FR
3652624192,3652624319,IT
3652624320,3652628479,FR
3652628480,3652628543,IT
3652628544,3652628607,FR
3652628608,3652628639,PL
3652628640,3652628855,FR
3652628856,3652628863,ES
3652628864,3652629743,FR
3652629744,3652629759,ES
3652629760,3652630015,FR
3652630016,3652630031,ES
3652630032,3652630079,FR
3652630080,3652630111,PL
3652630112,3652631295,FR
3652631296,3652631359,BE
3652631360,3652631391,FR
3652631392,3652631407,CH
3652631408,3652631423,FR
3652631424,3652631455,PL
3652631456,3652631551,FR
3652631552,3652631583,CZ
3652631584,3652631807,FR
3652631808,3652631823,GB
3652631824,3652632031,FR
3652632032,3652632063,PT
3652632064,3652632303,FR
3652632304,3652632311,ES
3652632312,3652633599,FR
3652633600,3652634623,DE
3652634624,3652635647,PL
3652635648,3652638655,FR
3652638656,3652638719,ES
3652638720,3652638815,FR
3652638816,3652638847,FI
3652638848,3652638975,GB
3652638976,3652639359,FR
3652639360,3652639423,DE
3652639424,3652639679,FR
3652639680,3652639807,NL
3652639808,3652640575,FR
3652640576,3652640703,GB
3652640704,3652640711,FR
3652640712,3652640719,ES
3652640720,3652640767,FR
3652640768,3652640831,ES
3652640832,3652641727,FR
3652641728,3652641791,GB
3652641792,3652642111,FR
3652642112,3652642175,IE
3652642176,3652642239,FR
3652642240,3652642303,DE
3652642304,3652642367,FR
3652642368,3652642431,GB
3652642432,3652642719,FR
3652642720,3652642751,PT
3652642752,3652642975,FR
3652642976,3652643007,IE
3652643008,3652643375,FR
3652643376,3652643379,ES
3652643380,3652643519,FR
3652643520,3652643583,NL
3652643584,3652643647,ES
3652643648,3652644031,FR
3652644032,3652644063,BE
3652644064,3652644199,FR
3652644200,3652644215,ES
3652644216,3652644223,FR
3652644224,3652644239,NL
3652644240,3652644247,FR
3652644248,3652644255,ES
3652644256,3652644351,FR
3652644352,3652644383,FI
3652644384,3652644415,PL
3652644416,3652644575,FR
3652644576,3652644607,DE
3652644608,3652645119,FR
3652645120,3652645503,GB
3652645504,3652645663,FR
3652645664,3652645695,FI
3652645696,3652645887,FR
3652645888,3652646015,NL
3652646016,3652646079,ES
3652646080,3652646111,FR
3652646112,3652646143,CZ
3652646144,3652646271,NL
3652646272,3652646655,FR
3652646656,3652646719,ES
3652646720,3652646799,FR
3652646800,3652646815,PL
3652646816,3652646847,FR
3652646848,3652646863,FI
3652646864,3652648847,FR
3652648848,3652648863,LT
3652648864,3652648895,FI
3652648896,3652648959,DE
3652648960,3652714495,IE
3652714496,3653238783,DE
3653238784,3653369855,CH
3653369856,3653373951,IT
3653373952,3653378047,NL
3653378048,3653382143,DE
3653382144,3653386239,CH
3653386240,3653390335,DE
3653390336,3653394431,FR
3653394432,3653402623,NL
3653402624,3653406557,GB
3653406558,3653406558,GN
3653406559,3653406617,GB
3653406618,3653406618,GN
3653406619,3653407103,GB
3653407104,3653407111,UG
3653407112,3653408071,GB
3653408072,3653408079,NG
3653408080,3653408231,GB
3653408232,3653408239,KE
3653408240,3653410815,GB
3653410816,3653414911,CZ
3653414912,3653419007,IT
3653419008,3653423103,IL
3653423104,3653427199,GB
3653427200,3653431295,DE
3653431296,3653435391,RU
3653435392,3653439487,DE
3653439488,3653443583,FR
3653443584,3653447679,DE
3653447680,3653451775,LV
3653451776,3653464063,RU
3653464064,3653468159,NL
3653468160,3653472255,GR
3653476352,3653480447,CZ
3653480448,3653484543,DK
3653484544,3653488639,TR
3653488640,3653492735,RU
3653492736,3653500927,NL
3653500928,3653505023,GB
3653505024,3653509119,KZ
3653509120,3653513215,NL
3653513216,3653517311,NO
3653517312,3653525503,AT
3653525504,3653529599,RU
3653529600,3653533695,CZ
3653533696,3653537791,IT
3653537792,3653541887,AT
3653541888,3653545983,UA
3653545984,3653550079,CH
3653550080,3653554175,MK
3653554176,3653558271,CZ
3653558272,3653566463,GB
3653566464,3653570559,RU
3653570560,3653574655,ES
3653574656,3653578751,CZ
3653578752,3653582847,SE
3653582848,3653586943,PL
3653586944,3653591039,DE
3653591040,3653595135,LU
3653595136,3653599231,RU
3653599232,3653601279,CH
3653601280,3653603327,BA
3653603328,3653607423,CZ
3653611520,3653615615,HU
3653615616,3653619711,RU
3653619712,3653623807,CH
3653623808,3653636095,RU
3653636096,3653640191,NL
3653640192,3653648383,GB
3653648384,3653652479,SE
3653652480,3653656575,RU
3653656576,3653660671,GB
3653660672,3653664767,CZ
3653664768,3653668863,DE
3653668864,3653672959,SE
3653672960,3653681151,RU
3653681152,3653685247,ES
3653685248,3653689343,DK
3653689344,3653693439,LV
3653693440,3653697535,DE
3653697536,3653705727,IT
3653705728,3653708331,NO
3653708332,3653708332,FI
3653708333,3653713919,NO
3653713920,3653718015,DE
3653718016,3653722111,AT
3653722112,3653730303,LV
3653730304,3653734399,BA
3653734400,3653738495,KE
3653738496,3653746687,GB
3653746688,3653750783,DE
3653750784,3653754879,RU
3653754880,3653758975,UA
3653758976,3653763071,RU
3653763072,3654025215,IT
3654025216,3654287359,GB
3654287360,3654608404,SE
3654608405,3654608405,NO
3654608406,3654608895,SE
3654608896,3654609919,NO
3654609920,3654610431,SE
3654610432,3654610943,FR
3654610944,3654610951,SE
3654610952,3654610959,DE
3654610960,3654612231,SE
3654612232,3654612239,AT
3654612240,3654612271,SE
3654612272,3654612287,AT
3654612288,3654614047,SE
3654614048,3654614063,GB
3654614064,3654614079,SE
3654614080,3654614271,FI
3654614272,3654811647,SE
3654811648,3654942719,ES
3654942720,3655073791,IR
3655073792,3655335935,IT
3655335936,3657433087,DE
3657433088,3659415455,CN
3659415456,3659415487,SG
3659415488,3659530239,CN
3659530240,3659595775,TW
3659595776,3659628543,ID
3659628544,3659661311,JP
3659661312,3659792383,TW
3659792384,3660054527,KR
3660054528,3660578815,JP
3660578816,3661103103,KR
3661103104,3663986687,CN
3663986688,3663987711,AU
3663987712,3663987967,ID
3663987968,3663989247,JP
3663989248,3663989503,VN
3663989504,3663989759,ID
3663989760,3663990271,AU
3663990272,3663990527,VN
3663990528,3663990783,JP
3663990784,3663991295,HK
3663991296,3663991551,MY
3663991552,3663991807,AU
3663992064,3663992319,NZ
3663992320,3663992575,MY
3663992576,3663993599,NZ
3663993600,3663996159,ID
3663996160,3663996415,AU
3663996416,3663996671,TH
3663996672,3663997183,AU
3663997184,3663997439,ID
3663997440,3663997695,JP
3663997696,3663997951,AU
3663997952,3663998207,MY
3663998208,3663998463,JP
3663998464,3663998975,TH
3663998976,3663999487,IN
3663999488,3663999743,AU
3664000000,3664000767,AU
3664000768,3664001023,ID
3664001024,3664001279,NZ
3664001280,3664001535,LK
3664001536,3664001791,MY
3664002048,3664002303,VN
3664002304,3664002559,LK
3664002560,3664003327,ID
3664003328,3664003583,NZ
3664003584,3664003839,TH
3664003840,3664004095,JP
3664004352,3664004607,MY
3664004864,3664005119,KH
3664005120,3664005887,ID
3664005888,3664006143,MY
3664006144,3664006399,AU
3664006400,3664006655,PF
3664006656,3664006911,AU
3664007168,3664008191,AU
3664008192,3664008447,MN
3664008448,3664008703,PK
3664008960,3664010239,AU
3664010240,3664052223,CN
3664052224,3664084991,NZ
3664084992,3664117759,KR
3664117760,3664248831,HK
3664248832,3664642047,CN
3664642048,3664707583,JP
3664707584,3664773119,MY
3664773120,3666870271,JP
3666870272,3666960455,KR
3666960456,3666960456,US
3666960457,3667918847,KR
3667918848,3668967423,TW
3668967424,3669491711,JP
3669491712,3669557247,TW
3669557248,3669590015,AU
3669590016,3669606399,JP
3669606400,3669614591,CN
3669614592,3669616639,NZ
3669616640,3669618687,AU
3669618688,3669620735,CN
3669620736,3669622783,IN
3669622784,3669688319,SG
3669688320,3669753855,TW
3669753856,3670015999,HK
3670016000,3671064575,CN
3671064576,3671130111,MY
3671130112,3671195647,KR
3671195648,3671326719,TW
3671326720,3671392255,SG
3671392256,3671457791,HK
3671457792,3671588863,AU
3671588864,3672637439,JP
3672637440,3673161727,KR
3673161728,3673686015,CN
3673686016,3673751551,IN
3673751552,3673817087,CN
3673817088,3673882623,HK
3673882624,3673948159,JP
3673948160,3674210303,HK
3674210304,3678404607,JP
3678404608,3678535679,IN
3678535680,3678666751,JP
3678666752,3678928895,TW
3678928896,3678994431,CN
3678994432,3679027199,HK
3679027200,3679059967,JP
3679059968,3679158271,SG
3679158272,3679191039,JP
3679191040,3679453183,HK
3679453184,3679584255,TW
3679584256,3679649791,CN
3679649792,3679682559,ID
3679682560,3679715327,CN
3679715328,3679977471,TW
3679977472,3680108543,NZ
3680108544,3680124927,TW
3680124928,3680125951,IN
3680125952,3680129023,CN
3680129024,3680133119,PH
3680133120,3680137215,IN
3680137216,3680141311,HK
3680141312,3680174079,AU
3680174080,3680206847,TW
3680206848,3680239615,IN
3680239616,3680403455,MY
3680403456,3680436223,JP
3680436224,3680501759,MY
3680501760,3682598911,JP
3682598912,3684575268,CN
3684575269,3684575269,HK
3684575270,3684696063,CN
3684696064,3688366079,JP
3688366080,3689938943,CN
3689938944,3690070015,KR
3690070016,3690463231,CN
3690463232,3690987519,KR
3690987520,3695181823,JP
3695181824,3697278975,KR
3697278976,3697573887,JP
3697573888,3697582079,GB
3697582080,3697586175,SG
3697586176,3697606655,JP
3697606656,3697655807,AU
3697655808,3697672191,CN
3697672192,3697737727,JP
3697737728,3697803263,KR
3697803264,3698327551,JP
3698327552,3698589695,CN
3698589696,3699376127,KR
3699376128,3700424703,TW
3700424704,3700752383,JP
3700752384,3700817919,KR
3700817920,3700981759,JP
3700981760,3701014527,CN
3701014528,3701080063,JP
3701080064,3701211135,CN
3701211136,3701252095,JP
3701252096,3701256191,NC
3701256192,3701258239,SG
3701258240,3701260287,IN
3701260288,3701293055,JP
3701293056,3701301247,AU
3701301248,3701305343,ID
3701305344,3701309439,TW
3701309440,3701374975,JP
3701374976,3701375999,IN
3701376000,3701377023,HK
3701377024,3701380095,IN
3701380096,3701381119,KH
3701381120,3701390335,IN
3701390336,3701391359,AU
3701391360,3701392383,IN
3701392384,3701393407,HK
3701393408,3701394431,MY
3701394432,3701395455,BD
3701395456,3701396479,MY
3701396480,3701397247,NZ
3701397248,3701397503,AU
3701397504,3701398527,JP
3701398528,3701399551,MV
3701399552,3701400575,HK
3701400576,3701401599,TW
3701401600,3701402623,BD
3701402624,3701403647,BT
3701403648,3701404671,CN
3701404672,3701405695,HK
3701405696,3701406719,JP
3701406720,3701407743,HK
3701407744,3701473279,JP
3701473280,3704619007,CN
3704619008,3705667583,JP
3705667584,3705929727,IN
3705929728,3706060799,TW
3706060800,3706126335,KR
3706126336,3706142719,CN
3706142720,3706159103,VN
3706159104,3706191871,CN
3706191872,3706207107,SG
3706207108,3706207108,US
3706207109,3706208255,SG
3706208256,3706224639,CN
3706224640,3706225663,HK
3706225664,3706226687,JP
3706226688,3706231807,HK
3706231808,3706232831,JP
3706232832,3706233343,HK
3706233344,3706234367,JP
3706234368,3706237951,HK
3706237952,3706238975,JP
3706238976,3706244095,HK
3706244096,3706244863,JP
3706244864,3706245887,HK
3706245888,3706246143,JP
3706246144,3706253823,HK
3706253824,3706254335,JP
3706254336,3706256895,HK
3706256896,3706257151,JP
3706257152,3706257407,HK
3706257408,3706322943,AU
3706322944,3706388479,CN
3706388480,3706781695,AU
3706781696,3706847231,HK
3706847232,3706978303,CN
3706978304,3707109375,AU
3707109376,3707174911,HK
3707174912,3707207679,JP
3707207680,3707208703,BD
3707208704,3707209727,NZ
3707209728,3707211775,CN
3707211776,3707215871,NP
3707215872,3707217919,BD
3707217920,3707219967,ID
3707219968,3707222015,AU
3707222016,3707224063,JP
3707224064,3707240447,LK
3707240448,3707568127,CN
3707568128,3707633663,AU
3707633664,3707699199,JP
3707699200,3707764735,SG
3707764736,3708600319,CN
3708600320,3708616703,JP
3708616704,3708813311,CN
3708813312,3715629055,JP
3715629056,3715653631,TW
3715653632,3715655679,BD
3715655680,3715657727,IN
3715657728,3715661823,SG
3715661824,3715670015,AU
3715670016,3715671039,KH
3715671040,3715672063,AU
3715672064,3715674111,JP
3715674112,3715678207,HK
3715678208,3715694591,PK
3715694592,3715710975,VN
3715710976,3715719167,AU
3715719168,3715727359,PH
3715727360,3715729151,AU
3715729152,3715729407,NZ
3715729408,3715735551,AU
3715735552,3715741695,JP
3715741696,3715743743,PH
3715743744,3715760127,JP
3715760128,3715891199,CN
3715891200,3716153343,HK
3716153344,3716170239,SG
3716170240,3716170494,TH
3716170495,3716171519,SG
3716171520,3716171775,JP
3716171776,3716172031,SG
3716172032,3716172287,JP
3716172288,3716173055,SG
3716173056,3716173311,JP
3716173312,3716173567,SG
3716173568,3716173823,JP
3716173824,3716174079,SG
3716174080,3716174083,TH
3716174084,3716174335,JP
3716174336,3716175615,SG
3716175616,3716176895,JP
3716176896,3716178175,SG
3716178176,3716178943,JP
3716178944,3716179967,SG
3716179968,3716181759,JP
3716181760,3716182783,SG
3716182784,3716183295,JP
3716183296,3716183551,SG
3716183552,3716184063,JP
3716184064,3716184319,SG
3716184320,3716184575,JP
3716184576,3716184831,SG
3716184832,3716185087,JP
3716185088,3716186111,SG
3716186112,3716415487,CN
3716415488,3716431871,VN
3716431872,3716440063,KR
3716440064,3716444159,JP
3716444160,3716446207,PK
3716446208,3716464639,JP
3716464640,3716481023,ID
3716481024,3716489215,VN
3716489216,3716493311,MY
3716493312,3716497407,KR
3716497408,3716513791,JP
3716513792,3716530175,KR
3716530176,3716538367,AU
3716538368,3716546559,CN
3716546560,3716677631,IN
3716677632,3716808703,CN
3716808704,3718840319,KR
3718840320,3718905855,TW
3718905856,3719036927,JP
3719036928,3719823359,CN
3719823360,3720347647,JP
3720347648,3720859647,CN
3720859648,3720863743,AU
3720863744,3723493375,CN
3723493376,3725590527,JP
3725590528,3730833407,CN
3730833408,3732602879,KR
3732602880,3732668415,TH
3732668416,3732733951,ID
3732733952,3732799487,CN
3732799488,3732832255,PH
3732832256,3732865023,CN
3732865024,3732930559,PH
3732930560,3733979135,CN
3733979136,3734503423,JP
3734503424,3734765567,NZ
3734765568,3734896639,TW
3734896640,3735027711,JP
3735027712,3735289855,CN
3735289856,3735388159,SG
3735388160,3735404543,LK
3735404544,3735420927,ID
3735420928,3735551999,HK
3735552000,3739222015,CN
3739222016,3739570175,JP
3739570176,3739572223,ID
3739572224,3739574271,AU
3739574272,3739680767,JP
3739680768,3739697151,KR
3739697152,3739746303,JP
3739746304,3740270591,KR
3740270592,3740925951,CN
3740925952,3741024255,TW
3741024256,3741057023,KR
3741057024,3741319167,VN
3741319168,3742367743,CN
3742367744,3742629887,HK
3742629888,3742760959,CN
3742760960,3742892031,TW
3742892032,3742957567,TH
3742957568,3742973951,PH
3742973952,3742982143,SG
3742982144,3742986239,ID
3742986240,3742988287,AU
3742988288,3742990335,VU
3742990336,3743006719,JP
3743006720,3743014911,TH
3743014912,3743016959,AU
3743016960,3743019007,SG
3743019008,3743022079,MY
3743022080,3743023103,BD
3743023104,3743027199,TW
3743027200,3743028223,IN
3743028224,3743029247,AF
3743029248,3743030271,NZ
3743030272,3743035391,IN
3743035392,3743039487,HK
3743039488,3743055871,TW
3743055872,3743088639,KR
3743088640,3743093647,AU
3743093648,3743093648,NZ
3743093649,3743096831,AU
3743096832,3743105023,TW
3743105024,3743106047,AU
3743106048,3743109119,JP
3743109120,3743113215,BD
3743113216,3743115263,AU
3743115264,3743117311,VN
3743117312,3743118335,BD
3743118336,3743119359,JP
3743119360,3743120383,IN
3743120384,3743121407,JP
3743121408,3743125503,MY
3743125504,3743129599,ID
3743129600,3743130623,HK
3743130624,3743130879,SG
3743130880,3743131135,HK
3743131136,3743133695,SG
3743133696,3743134719,AU
3743134720,3743135743,JP
3743135744,3743136767,CN
3743136768,3743137791,MY
3743137792,3743154175,TH
3743154176,3743186943,MY
3743186944,3743219711,KR
3743219712,3743252479,JP
3743252480,3743264767,NC
3743264768,3743268863,JP
3743268864,3743272959,IN
3743272960,3743273983,CN
3743273984,3743275007,BD
3743275008,3743276031,HK
3743276032,3743277055,IN
3743277056,3743281151,PK
3743281152,3743282175,AU
3743282176,3743283199,JP
3743283200,3743284223,HK
3743284224,3743285247,CN
3743285248,3743416319,IN
3743416320,3745513471,KR
3745513472,3749052415,CN
3749052416,3749183487,HK
3749183488,3749838847,CN
3749838848,3749839871,SG
3749839872,3749840895,IN
3749840896,3749841919,CN
3749841920,3749842943,AU
3749842944,3749843967,PH
3749843968,3749844991,ID
3749844992,3749846015,AU
3749846016,3749847039,IN
3749847040,3749855231,HK
3749855232,3749969919,KR
3749969920,3750232063,JP
3750232064,3750756351,TW
3750756352,3752067071,CN
3752067072,3752132607,ID
3752132608,3752133631,BD
3752133632,3752134655,ID
3752134656,3752136703,TW
3752136704,3752137727,NZ
3752137728,3752138751,JP
3752138752,3752140799,IN
3752140800,3752148991,JP
3752148992,3752153087,NZ
3752153088,3752157183,JP
3752157184,3752165375,AU
3752165376,3752198143,KR
3752198144,3752329215,CN
3752329216,3752853503,KR
3752853504,3753902079,IN
3753902080,3754033151,CN
3754033152,3754164223,KR
3754164224,3754229759,IN
3754229760,3754295295,HK
3754295296,3754426367,CN
3754426368,3754491903,TW
3754491904,3754688511,CN
3754688512,3754950655,TH
3754950656,3755474943,CN
3755474944,3755737087,JP
3755737088,3755868159,CN
3755868160,3755933695,KR
3755933696,3755966463,JP
3755966464,3755974655,IN
3755974656,3755976703,JP
3755976704,3755978751,KH
3755978752,3755986943,CN
3755986944,3755988991,JP
3755988992,3755990015,HK
3755990016,3755991039,SG
3755991040,3755999231,JP
3755999232,3757047807,IN
3757047808,3757834239,CN
3757834240,3757850623,AU
3757850624,3757858815,JP
3757858816,3757862911,AU
3757862912,3757867007,JP
3757867008,3757875519,CN
3757875520,3757875583,HK
3757875584,3757899775,CN
3757899776,3757965311,KR
3757965312,3758063615,CN
3758063616,3758079999,HK
3758080000,3758088191,KR
3758088192,3758090239,ID
3758090240,3758091263,AU
3758091264,3758092287,CN
3758092288,3758093311,HK
3758093312,3758094335,IN
3758094336,3758095359,HK
3758095360,3758095871,CN
3758095872,3758096127,SG
3758096128,3758096383,AU
07070100000019000081A400000000000000000000000167D9BD4E000094D8000000000000000000000000000000000000002400000000snowflake-2.11.0/broker/test_geoip6# Last updated based on February 7 2018 Maxmind GeoLite2 Country
# wget https://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz
# gunzip GeoLite2-Country.mmdb.gz
# python mmdb-convert.py GeoLite2-Country.mmdb
600:8801:9400:5a1:948b:ab15:dde3:61a3,600:8801:9400:5a1:948b:ab15:dde3:61a3,US
2001:200::,2001:200:ffff:ffff:ffff:ffff:ffff:ffff,JP
2001:208::,2001:208:ffff:ffff:ffff:ffff:ffff:ffff,SG
2001:218::,2001:218:ffff:ffff:ffff:ffff:ffff:ffff,JP
2001:220::,2001:220:ffff:ffff:ffff:ffff:ffff:ffff,KR
2001:230::,2001:230:ffff:ffff:ffff:ffff:ffff:ffff,KR
2001:238::,2001:238:ffff:ffff:ffff:ffff:ffff:ffff,TW
2001:240::,2001:240:ffff:ffff:ffff:ffff:ffff:ffff,JP
2620:21:2000::,2620:21:2000:ffff:ffff:ffff:ffff:ffff,US
2620:21:4000::,2620:21:4000:ffff:ffff:ffff:ffff:ffff,US
2620:21:6000::,2620:21:600f:ffff:ffff:ffff:ffff:ffff,US
2620:21:8000::,2620:21:8000:ffff:ffff:ffff:ffff:ffff,US
2620:21:a000::,2620:21:a000:ffff:ffff:ffff:ffff:ffff,US
2620:21:c000::,2620:21:c000:ffff:ffff:ffff:ffff:ffff,CA
2620:21:e000::,2620:21:e000:ffff:ffff:ffff:ffff:ffff,US
2620:22::,2620:22::ffff:ffff:ffff:ffff:ffff,US
2620:22:2000::,2620:22:2000:ffff:ffff:ffff:ffff:ffff,US
2620:22:4000::,2620:22:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:22:6000::,2620:22:6000:ffff:ffff:ffff:ffff:ffff,US
2620:c2:8000::,2620:c2:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c2:c000::,2620:c2:c000:ffff:ffff:ffff:ffff:ffff,US
2620:c3::,2620:c3::ffff:ffff:ffff:ffff:ffff,US
2620:c3:4000::,2620:c3:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c3:8000::,2620:c3:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c3:c000::,2620:c3:c00f:ffff:ffff:ffff:ffff:ffff,US
2620:c4::,2620:c4::ffff:ffff:ffff:ffff:ffff,US
2620:c4:4000::,2620:c4:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c4:8000::,2620:c4:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c4:c000::,2620:c4:c000:ffff:ffff:ffff:ffff:ffff,CA
2620:c5::,2620:c5::ffff:ffff:ffff:ffff:ffff,US
2620:c5:4000::,2620:c5:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c5:c000::,2620:c5:c000:ffff:ffff:ffff:ffff:ffff,US
2620:c6::,2620:c6::ffff:ffff:ffff:ffff:ffff,US
2620:c6:4000::,2620:c6:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c6:8000::,2620:c6:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c6:c000::,2620:c6:c000:ffff:ffff:ffff:ffff:ffff,US
2620:c7::,2620:c7::ffff:ffff:ffff:ffff:ffff,US
2620:c7:4000::,2620:c7:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c7:8000::,2620:c7:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c7:c000::,2620:c7:c000:ffff:ffff:ffff:ffff:ffff,US
2620:c8::,2620:c8::ffff:ffff:ffff:ffff:ffff,US
2620:c8:4000::,2620:c8:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c8:c000::,2620:c8:c00f:ffff:ffff:ffff:ffff:ffff,US
2620:c9::,2620:c9::ffff:ffff:ffff:ffff:ffff,US
2620:c9:4000::,2620:c9:4000:ffff:ffff:ffff:ffff:ffff,US
2620:c9:8000::,2620:c9:8000:ffff:ffff:ffff:ffff:ffff,US
2620:c9:c000::,2620:c9:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ca::,2620:ca::ffff:ffff:ffff:ffff:ffff,US
2620:ca:4000::,2620:ca:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ca:8000::,2620:ca:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ca:c000::,2620:ca:c000:ffff:ffff:ffff:ffff:ffff,US
2620:cb::,2620:cb:f:ffff:ffff:ffff:ffff:ffff,US
2620:cb:4000::,2620:cb:4000:ffff:ffff:ffff:ffff:ffff,US
2620:cb:8000::,2620:cb:8000:ffff:ffff:ffff:ffff:ffff,US
2620:cb:c000::,2620:cb:c000:ffff:ffff:ffff:ffff:ffff,US
2620:cc::,2620:cc::ffff:ffff:ffff:ffff:ffff,US
2620:cc:4000::,2620:cc:4000:ffff:ffff:ffff:ffff:ffff,US
2620:cc:8000::,2620:cc:8000:ffff:ffff:ffff:ffff:ffff,US
2620:cc:c000::,2620:cc:c000:ffff:ffff:ffff:ffff:ffff,US
2620:cd::,2620:cd::ffff:ffff:ffff:ffff:ffff,US
2620:cd:4000::,2620:cd:4000:ffff:ffff:ffff:ffff:ffff,US
2620:cd:8000::,2620:cd:8000:ffff:ffff:ffff:ffff:ffff,US
2620:cd:c000::,2620:cd:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ce::,2620:ce::ffff:ffff:ffff:ffff:ffff,US
2620:ce:4000::,2620:ce:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ce:8000::,2620:ce:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ce:c000::,2620:ce:c000:ffff:ffff:ffff:ffff:ffff,US
2620:cf:4000::,2620:cf:4000:ffff:ffff:ffff:ffff:ffff,US
2620:cf:8000::,2620:cf:8000:ffff:ffff:ffff:ffff:ffff,US
2620:cf:c000::,2620:cf:c00f:ffff:ffff:ffff:ffff:ffff,US
2620:d0::,2620:d0::ffff:ffff:ffff:ffff:ffff,US
2620:d0:4000::,2620:d0:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d0:8000::,2620:d0:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d0:c000::,2620:d0:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d1::,2620:d1::ffff:ffff:ffff:ffff:ffff,US
2620:d1:4000::,2620:d1:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d1:8000::,2620:d1:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d1:c000::,2620:d1:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d2::,2620:d2::ffff:ffff:ffff:ffff:ffff,US
2620:d2:4000::,2620:d2:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d2:8000::,2620:d2:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d2:c000::,2620:d2:c000:ffff:ffff:ffff:ffff:ffff,CA
2620:d3:4000::,2620:d3:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d3:8000::,2620:d3:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d3:c000::,2620:d3:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d4::,2620:d4::ffff:ffff:ffff:ffff:ffff,US
2620:d4:4000::,2620:d4:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d4:8000::,2620:d4:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d5::,2620:d5::ffff:ffff:ffff:ffff:ffff,US
2620:d5:4000::,2620:d5:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d5:8000::,2620:d5:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d5:c000::,2620:d5:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d6::,2620:d6::ffff:ffff:ffff:ffff:ffff,US
2620:d6:4000::,2620:d6:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d6:8000::,2620:d6:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d6:c000::,2620:d6:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d7::,2620:d7::ffff:ffff:ffff:ffff:ffff,US
2620:d7:4000::,2620:d7:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:d7:8000::,2620:d7:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d7:c000::,2620:d7:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d8::,2620:d8::ffff:ffff:ffff:ffff:ffff,US
2620:d8:4000::,2620:d8:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d8:8000::,2620:d8:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d8:c000::,2620:d8:c000:ffff:ffff:ffff:ffff:ffff,US
2620:d9::,2620:d9::ffff:ffff:ffff:ffff:ffff,US
2620:d9:4000::,2620:d9:4000:ffff:ffff:ffff:ffff:ffff,US
2620:d9:8000::,2620:d9:8000:ffff:ffff:ffff:ffff:ffff,US
2620:d9:c000::,2620:d9:c000:ffff:ffff:ffff:ffff:ffff,US
2620:da::,2620:da::ffff:ffff:ffff:ffff:ffff,US
2620:da:4000::,2620:da:4000:ffff:ffff:ffff:ffff:ffff,US
2620:da:c000::,2620:da:c000:ffff:ffff:ffff:ffff:ffff,US
2620:db::,2620:db::ffff:ffff:ffff:ffff:ffff,US
2620:db:4000::,2620:db:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:db:8000::,2620:db:8000:ffff:ffff:ffff:ffff:ffff,US
2620:db:c000::,2620:db:c000:ffff:ffff:ffff:ffff:ffff,US
2620:dc::,2620:dc::ffff:ffff:ffff:ffff:ffff,US
2620:dc:8::,2620:dc:8:ffff:ffff:ffff:ffff:ffff,US
2620:dc:4000::,2620:dc:40ff:ffff:ffff:ffff:ffff:ffff,US
2620:dc:8000::,2620:dc:8000:ffff:ffff:ffff:ffff:ffff,CA
2620:dc:c000::,2620:dc:c000:ffff:ffff:ffff:ffff:ffff,US
2620:dd::,2620:dd::ffff:ffff:ffff:ffff:ffff,CA
2620:dd:4000::,2620:dd:4000:ffff:ffff:ffff:ffff:ffff,US
2620:dd:8000::,2620:dd:8000:ffff:ffff:ffff:ffff:ffff,US
2620:dd:c000::,2620:dd:c000:ffff:ffff:ffff:ffff:ffff,US
2620:de::,2620:de::ffff:ffff:ffff:ffff:ffff,US
2620:de:4000::,2620:de:4000:ffff:ffff:ffff:ffff:ffff,US
2620:de:8000::,2620:de:8000:ffff:ffff:ffff:ffff:ffff,US
2620:de:c000::,2620:de:c000:ffff:ffff:ffff:ffff:ffff,US
2620:df::,2620:df::ffff:ffff:ffff:ffff:ffff,US
2620:df:4000::,2620:df:400f:ffff:ffff:ffff:ffff:ffff,US
2620:df:8000::,2620:df:8000:ffff:ffff:ffff:ffff:ffff,US
2620:df:c000::,2620:df:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e0::,2620:e0::ffff:ffff:ffff:ffff:ffff,US
2620:e0:4000::,2620:e0:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e0:8000::,2620:e0:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e0:c000::,2620:e0:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e1::,2620:e1::ffff:ffff:ffff:ffff:ffff,US
2620:e1:4000::,2620:e1:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e1:8000::,2620:e1:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e1:c000::,2620:e1:c000:ffff:ffff:ffff:ffff:ffff,VG
2620:e2::,2620:e2::ffff:ffff:ffff:ffff:ffff,US
2620:e2:4000::,2620:e2:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e2:8000::,2620:e2:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e2:c000::,2620:e2:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e3::,2620:e3::ffff:ffff:ffff:ffff:ffff,US
2620:e3:4000::,2620:e3:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e3:8000::,2620:e3:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e3:c000::,2620:e3:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e4::,2620:e4::ffff:ffff:ffff:ffff:ffff,US
2620:e4:4000::,2620:e4:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e4:8000::,2620:e4:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e4:c000::,2620:e4:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e5::,2620:e5::ffff:ffff:ffff:ffff:ffff,US
2620:e5:4000::,2620:e5:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e5:8000::,2620:e5:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e5:c000::,2620:e5:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e6::,2620:e6::ffff:ffff:ffff:ffff:ffff,US
2620:e6:4000::,2620:e6:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e6:8000::,2620:e6:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e6:c000::,2620:e6:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e7::,2620:e7::ffff:ffff:ffff:ffff:ffff,US
2620:e7:4000::,2620:e7:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e7:8000::,2620:e7:8000:ffff:ffff:ffff:ffff:ffff,CA
2620:e7:c000::,2620:e7:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e8::,2620:e8::ffff:ffff:ffff:ffff:ffff,US
2620:e8:4000::,2620:e8:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e8:8000::,2620:e8:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e8:c000::,2620:e8:c000:ffff:ffff:ffff:ffff:ffff,US
2620:e9::,2620:e9::ffff:ffff:ffff:ffff:ffff,US
2620:e9:4000::,2620:e9:4000:ffff:ffff:ffff:ffff:ffff,US
2620:e9:8000::,2620:e9:8000:ffff:ffff:ffff:ffff:ffff,US
2620:e9:c000::,2620:e9:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ea::,2620:ea:f:ffff:ffff:ffff:ffff:ffff,US
2620:ea:4000::,2620:ea:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ea:8000::,2620:ea:8000:ffff:ffff:ffff:ffff:ffff,US
2620:eb::,2620:eb::ffff:ffff:ffff:ffff:ffff,US
2620:eb:4000::,2620:eb:4000:ffff:ffff:ffff:ffff:ffff,US
2620:eb:8000::,2620:eb:8000:ffff:ffff:ffff:ffff:ffff,US
2620:eb:c000::,2620:eb:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ec::,2620:ec::ffff:ffff:ffff:ffff:ffff,US
2620:ec:4000::,2620:ec:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ec:8000::,2620:ec:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ec:c000::,2620:ec:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ed::,2620:ed::ffff:ffff:ffff:ffff:ffff,US
2620:ed:4000::,2620:ed:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:ed:8000::,2620:ed:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ed:c000::,2620:ed:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ee::,2620:ee::ffff:ffff:ffff:ffff:ffff,US
2620:ee:4000::,2620:ee:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ee:8000::,2620:ee:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ee:c000::,2620:ee:c00f:ffff:ffff:ffff:ffff:ffff,US
2620:ef:4000::,2620:ef:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ef:8000::,2620:ef:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ef:c000::,2620:ef:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f0::,2620:f0::ffff:ffff:ffff:ffff:ffff,US
2620:f0:4000::,2620:f0:400f:ffff:ffff:ffff:ffff:ffff,US
2620:f0:8000::,2620:f0:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f0:c000::,2620:f0:c002:ffff:ffff:ffff:ffff:ffff,US
2620:f0:c003::,2620:f0:c003:ffff:ffff:ffff:ffff:ffff,NL
2620:f0:c004::,2620:f0:c004:ffff:ffff:ffff:ffff:ffff,US
2620:f0:c005::,2620:f0:c005:ffff:ffff:ffff:ffff:ffff,SG
2620:f0:c006::,2620:f0:c009:ffff:ffff:ffff:ffff:ffff,US
2620:f0:c00a::,2620:f0:c00a:ffff:ffff:ffff:ffff:ffff,CA
2620:f0:c00b::,2620:f0:c00f:ffff:ffff:ffff:ffff:ffff,US
2620:f1:4000::,2620:f1:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:f1:8000::,2620:f1:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f1:c000::,2620:f1:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f2::,2620:f2::ffff:ffff:ffff:ffff:ffff,CA
2620:f2:4000::,2620:f2:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f2:8000::,2620:f2:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f2:c000::,2620:f2:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f3::,2620:f3::ffff:ffff:ffff:ffff:ffff,US
2620:f3:4000::,2620:f3:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f3:8000::,2620:f3:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f3:c000::,2620:f3:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f4::,2620:f4::ffff:ffff:ffff:ffff:ffff,US
2620:f4:4000::,2620:f4:40ff:ffff:ffff:ffff:ffff:ffff,US
2620:f4:8000::,2620:f4:8000:ffff:ffff:ffff:ffff:ffff,CA
2620:f4:c000::,2620:f4:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f5::,2620:f5::ffff:ffff:ffff:ffff:ffff,US
2620:f5:4000::,2620:f5:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f5:8000::,2620:f5:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f5:c000::,2620:f5:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f6::,2620:f6::ffff:ffff:ffff:ffff:ffff,CA
2620:f6:4000::,2620:f6:400f:ffff:ffff:ffff:ffff:ffff,US
2620:f6:8000::,2620:f6:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f6:c000::,2620:f6:c000:ffff:ffff:ffff:ffff:ffff,CA
2620:f7::,2620:f7::ffff:ffff:ffff:ffff:ffff,US
2620:f7:4000::,2620:f7:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f7:8000::,2620:f7:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f7:c000::,2620:f7:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f8::,2620:f8::ffff:ffff:ffff:ffff:ffff,US
2620:f8:4000::,2620:f8:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f8:8000::,2620:f8:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f8:c000::,2620:f8:c000:ffff:ffff:ffff:ffff:ffff,US
2620:f9::,2620:f9:f:ffff:ffff:ffff:ffff:ffff,US
2620:f9:4000::,2620:f9:4000:ffff:ffff:ffff:ffff:ffff,US
2620:f9:8000::,2620:f9:8000:ffff:ffff:ffff:ffff:ffff,US
2620:f9:c000::,2620:f9:c000:ffff:ffff:ffff:ffff:ffff,US
2620:fa::,2620:fa::ffff:ffff:ffff:ffff:ffff,US
2620:fa:4000::,2620:fa:4000:ffff:ffff:ffff:ffff:ffff,US
2620:fa:8000::,2620:fa:8000:ffff:ffff:ffff:ffff:ffff,CA
2620:fa:c000::,2620:fa:c000:ffff:ffff:ffff:ffff:ffff,US
2620:fb::,2620:fb::ffff:ffff:ffff:ffff:ffff,US
2620:fb:4000::,2620:fb:4000:ffff:ffff:ffff:ffff:ffff,US
2620:fb:8000::,2620:fb:8000:ffff:ffff:ffff:ffff:ffff,US
2620:fc::,2620:fc::ffff:ffff:ffff:ffff:ffff,CA
2620:fc:4000::,2620:fc:4000:ffff:ffff:ffff:ffff:ffff,CA
2620:fc:8000::,2620:fc:8000:ffff:ffff:ffff:ffff:ffff,US
2620:fc:c000::,2620:fc:c000:ffff:ffff:ffff:ffff:ffff,US
2620:fd::,2620:fd::ffff:ffff:ffff:ffff:ffff,CA
2620:fd:4000::,2620:fd:4000:ffff:ffff:ffff:ffff:ffff,US
2620:fd:8000::,2620:fd:8000:ffff:ffff:ffff:ffff:ffff,US
2620:fd:c000::,2620:fd:c000:ffff:ffff:ffff:ffff:ffff,CA
2620:fe::,2620:fe::ffff:ffff:ffff:ffff:ffff,US
2620:fe:2040::,2620:fe:2040:ffff:ffff:ffff:ffff:ffff,US
2620:fe:8000::,2620:fe:8000:ffff:ffff:ffff:ffff:ffff,US
2620:fe:c000::,2620:fe:c000:ffff:ffff:ffff:ffff:ffff,US
2620:ff::,2620:ff::ffff:ffff:ffff:ffff:ffff,US
2620:ff:4000::,2620:ff:4000:ffff:ffff:ffff:ffff:ffff,US
2620:ff:8000::,2620:ff:8000:ffff:ffff:ffff:ffff:ffff,US
2620:ff:c000::,2620:ff:c000:ffff:ffff:ffff:ffff:ffff,US
2620:100::,2620:100:f:ffff:ffff:ffff:ffff:ffff,US
2620:100:3000::,2620:100:3007:ffff:ffff:ffff:ffff:ffff,US
2620:100:4000::,2620:100:403f:ffff:ffff:ffff:ffff:ffff,US
2620:100:5000::,2620:100:5007:ffff:ffff:ffff:ffff:ffff,US
2620:100:6000::,2620:100:60ff:ffff:ffff:ffff:ffff:ffff,US
2620:100:7000::,2620:100:700f:ffff:ffff:ffff:ffff:ffff,US
2620:100:8000::,2620:100:8003:ffff:ffff:ffff:ffff:ffff,US
2620:100:9000::,2620:100:900f:ffff:ffff:ffff:ffff:ffff,US
2620:100:a000::,2620:100:a00f:ffff:ffff:ffff:ffff:ffff,US
2620:100:c000::,2620:100:c03f:ffff:ffff:ffff:ffff:ffff,US
2620:100:d000::,2620:100:d00f:ffff:ffff:ffff:ffff:ffff,US
2620:100:e000::,2620:100:e00f:ffff:ffff:ffff:ffff:ffff,US
2620:100:f000::,2620:100:f00f:ffff:ffff:ffff:ffff:ffff,US
2620:101::,2620:101:3:ffff:ffff:ffff:ffff:ffff,US
2620:101:1000::,2620:101:103f:ffff:ffff:ffff:ffff:ffff,US
2620:101:2000::,2620:101:201f:ffff:ffff:ffff:ffff:ffff,US
2620:101:3000::,2620:101:303f:ffff:ffff:ffff:ffff:ffff,US
2620:101:4000::,2620:101:403f:ffff:ffff:ffff:ffff:ffff,US
2620:101:5000::,2620:101:503f:ffff:ffff:ffff:ffff:ffff,US
2620:101:6000::,2620:101:6001:ffff:ffff:ffff:ffff:ffff,US
2620:101:7000::,2620:101:7001:ffff:ffff:ffff:ffff:ffff,US
2620:101:8000::,2620:101:80f1:ffff:ffff:ffff:ffff:ffff,US
2620:101:80f2::,2620:101:80f2:7fff:ffff:ffff:ffff:ffff,CA
2620:101:80f2:8000::,2620:101:80ff:ffff:ffff:ffff:ffff:ffff,US
2620:101:9000::,2620:101:900f:ffff:ffff:ffff:ffff:ffff,US
2620:101:b000::,2620:101:b07f:ffff:ffff:ffff:ffff:ffff,US
2620:101:c000::,2620:101:c0ff:ffff:ffff:ffff:ffff:ffff,CA
2620:101:d000::,2620:101:d007:ffff:ffff:ffff:ffff:ffff,US
2620:101:e000::,2620:101:e00f:ffff:ffff:ffff:ffff:ffff,US
2620:101:f000::,2620:101:f001:ffff:ffff:ffff:ffff:ffff,CA
2620:102::,2620:102:f:ffff:ffff:ffff:ffff:ffff,US
2620:102:2000::,2620:102:200f:ffff:ffff:ffff:ffff:ffff,US
2620:102:3000::,2620:102:300f:ffff:ffff:ffff:ffff:ffff,US
2620:102:4000::,2620:102:403f:ffff:ffff:ffff:ffff:ffff,US
2a07:14c0::,2a07:14c7:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:1500::,2a07:1507:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:1540::,2a07:1547:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:1580::,2a07:1587:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:15c0::,2a07:15c7:ffff:ffff:ffff:ffff:ffff:ffff,TR
2a07:1600::,2a07:1607:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:1640::,2a07:1647:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:1680::,2a07:1687:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:16c0::,2a07:16c7:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:1700::,2a07:1707:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:1740::,2a07:1747:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:1780::,2a07:1787:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:17c0::,2a07:17c7:ffff:ffff:ffff:ffff:ffff:ffff,UA
2a07:1800::,2a07:1807:ffff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1840::,2a07:1847:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:1880::,2a07:1887:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:18c0::,2a07:18c7:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:1900::,2a07:1907:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:1940::,2a07:1947:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:1980::,2a07:1987:ffff:ffff:ffff:ffff:ffff:ffff,IL
2a07:19c0::,2a07:19c7:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:1a00::,2a07:1a07:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:1a40::,2a07:1a47:ffff:ffff:ffff:ffff:ffff:ffff,PL
2a07:1a80::,2a07:1a80:6fff:ffff:ffff:ffff:ffff:ffff,SE
2a07:1a80:7000::,2a07:1a80:70ff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1a80:7100::,2a07:1a87:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:1ac0::,2a07:1ac7:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:1b00::,2a07:1b07:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:1b40::,2a07:1b47:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:1b80::,2a07:1b87:ffff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1bc0::,2a07:1bc7:ffff:ffff:ffff:ffff:ffff:ffff,PL
2a07:1c00::,2a07:1c07:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:1c40::,2a07:1c44:3ff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:400::,2a07:1c44:4ff:ffff:ffff:ffff:ffff:ffff,DE
2a07:1c44:500::,2a07:1c44:609:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:60a::,2a07:1c44:60a:ffff:ffff:ffff:ffff:ffff,DE
2a07:1c44:60b::,2a07:1c44:619:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:61a::,2a07:1c44:61a:ffff:ffff:ffff:ffff:ffff,KR
2a07:1c44:61b::,2a07:1c44:67f:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:680::,2a07:1c44:6bf:ffff:ffff:ffff:ffff:ffff,KR
2a07:1c44:6c0::,2a07:1c44:6ff:ffff:ffff:ffff:ffff:ffff,DE
2a07:1c44:700::,2a07:1c44:70f:ffff:ffff:ffff:ffff:ffff,US
2a07:1c44:710::,2a07:1c44:1800:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:1801::,2a07:1c44:1802:ffff:ffff:ffff:ffff:ffff,US
2a07:1c44:1803::,2a07:1c44:35ff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:3600::,2a07:1c44:36ff:ffff:ffff:ffff:ffff:ffff,GB
2a07:1c44:3700::,2a07:1c44:3fff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:4000::,2a07:1c44:40ff:ffff:ffff:ffff:ffff:ffff,US
2a07:1c44:4100::,2a07:1c44:42ff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:4300::,2a07:1c44:43ff:ffff:ffff:ffff:ffff:ffff,HR
2a07:1c44:4400::,2a07:1c44:4fff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c44:5000::,2a07:1c44:51ff:ffff:ffff:ffff:ffff:ffff,US
2a07:1c44:5200::,2a07:1c47:ffff:ffff:ffff:ffff:ffff:ffff,AT
2a07:1c80::,2a07:1c87:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:1cc0::,2a07:1cc7:ffff:ffff:ffff:ffff:ffff:ffff,PL
2a07:1d00::,2a07:1d07:ffff:ffff:ffff:ffff:ffff:ffff,IR
2a07:1d40::,2a07:1d47:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:1d80::,2a07:1d87:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:1dc0::,2a07:1dc7:ffff:ffff:ffff:ffff:ffff:ffff,PL
2a07:1e00::,2a07:1e07:ffff:ffff:ffff:ffff:ffff:ffff,KZ
2a07:1e40::,2a07:1e47:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:1e80::,2a07:1e87:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:1ec0::,2a07:1ec7:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:1f00::,2a07:1f07:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:1f40::,2a07:1f47:ffff:ffff:ffff:ffff:ffff:ffff,CZ
2a07:1f80::,2a07:1f87:ffff:ffff:ffff:ffff:ffff:ffff,US
2a07:1fc0::,2a07:1fc7:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2000::,2a07:2007:ffff:ffff:ffff:ffff:ffff:ffff,IQ
2a07:2040::,2a07:2047:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:2080::,2a07:2087:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:20c0::,2a07:20c7:ffff:ffff:ffff:ffff:ffff:ffff,CZ
2a07:2100::,2a07:2107:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2140::,2a07:2147:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:2180::,2a07:2187:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:21c0::,2a07:21c7:ffff:ffff:ffff:ffff:ffff:ffff,TR
2a07:2200::,2a07:2207:ffff:ffff:ffff:ffff:ffff:ffff,IR
2a07:2240::,2a07:2247:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:2280::,2a07:2287:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:2300::,2a07:2307:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:2340::,2a07:2347:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:2380::,2a07:2387:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:23c0::,2a07:23c7:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:2400::,2a07:2407:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2440::,2a07:2447:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:2480::,2a07:2487:ffff:ffff:ffff:ffff:ffff:ffff,IR
2a07:24c0::,2a07:24c7:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:2500::,2a07:2507:ffff:ffff:ffff:ffff:ffff:ffff,DK
2a07:2540::,2a07:2547:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2580::,2a07:2587:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:25c0::,2a07:25c7:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:2600::,2a07:2607:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:2640::,2a07:2647:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:2680::,2a07:2687:ffff:ffff:ffff:ffff:ffff:ffff,DK
2a07:26c0::,2a07:26c7:ffff:ffff:ffff:ffff:ffff:ffff,BE
2a07:2700::,2a07:2707:ffff:ffff:ffff:ffff:ffff:ffff,TR
2a07:2740::,2a07:2747:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:2780::,2a07:2787:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:27c0::,2a07:27c7:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:2800::,2a07:2807:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2840::,2a07:2847:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:2880::,2a07:2887:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:28c0::,2a07:28c7:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2900::,2a07:291f:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:2a00::,2a07:2a07:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:2a40::,2a07:2a47:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2a80::,2a07:2a87:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:2ac0::,2a07:2ac7:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2b00::,2a07:2b07:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2b40::,2a07:2b47:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:2b80::,2a07:2b87:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:2bc0::,2a07:2bc7:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:2c00::,2a07:2c07:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:2c40::,2a07:2c47:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2c80::,2a07:2c87:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2cc0::,2a07:2cc7:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:2d00::,2a07:2d07:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2d40::,2a07:2d47:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:2d80::,2a07:2d87:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2dc0::,2a07:2dc7:ffff:ffff:ffff:ffff:ffff:ffff,FI
2a07:2e00::,2a07:2e07:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:2e40::,2a07:2e47:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:2e80::,2a07:2e87:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:2ec0::,2a07:2ec7:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2f00::,2a07:2f07:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:2f40::,2a07:2f47:ffff:ffff:ffff:ffff:ffff:ffff,UA
2a07:2f80::,2a07:2f87:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:2fc0::,2a07:2fc7:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3000::,2a07:3007:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:3040::,2a07:3047:ffff:ffff:ffff:ffff:ffff:ffff,PL
2a07:3080::,2a07:3087:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:30c0::,2a07:30c7:ffff:ffff:ffff:ffff:ffff:ffff,CZ
2a07:3100::,2a07:3107:ffff:ffff:ffff:ffff:ffff:ffff,RO
2a07:3140::,2a07:3147:ffff:ffff:ffff:ffff:ffff:ffff,BE
2a07:3180::,2a07:3187:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:31c0::,2a07:31c7:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3200::,2a07:3207:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3240::,2a07:3247:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:3280::,2a07:3287:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:32c0::,2a07:32c7:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:3300::,2a07:3307:ffff:ffff:ffff:ffff:ffff:ffff,TR
2a07:3340::,2a07:3347:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:3380::,2a07:3387:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:33c0::,2a07:33c7:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:3400::,2a07:3407:ffff:ffff:ffff:ffff:ffff:ffff,UA
2a07:3440::,2a07:3447:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3480::,2a07:3487:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3500::,2a07:3507:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3540::,2a07:3547:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:3580::,2a07:3587:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:35c0::,2a07:35c7:ffff:ffff:ffff:ffff:ffff:ffff,UA
2a07:3600::,2a07:3607:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:3640::,2a07:3647:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3680::,2a07:3687:ffff:ffff:ffff:ffff:ffff:ffff,LB
2a07:36c0::,2a07:36c7:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3700::,2a07:3707:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3740::,2a07:3747:ffff:ffff:ffff:ffff:ffff:ffff,AT
2a07:3780::,2a07:3787:ffff:ffff:ffff:ffff:ffff:ffff,IS
2a07:37c0::,2a07:37c7:ffff:ffff:ffff:ffff:ffff:ffff,BE
2a07:3800::,2a07:3807:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:3840::,2a07:3847:ffff:ffff:ffff:ffff:ffff:ffff,HR
2a07:3880::,2a07:3887:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:38c0::,2a07:38c7:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:3900::,2a07:3907:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:3940::,2a07:3947:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:3980::,2a07:3987:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:39c0::,2a07:39c7:ffff:ffff:ffff:ffff:ffff:ffff,DK
2a07:3a00::,2a07:3a07:ffff:ffff:ffff:ffff:ffff:ffff,ES
2a07:3a80::,2a07:3a87:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:3ac0::,2a07:3ac7:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:3b00::,2a07:3b07:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:3b40::,2a07:3b47:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3b80::,2a07:3b87:ffff:ffff:ffff:ffff:ffff:ffff,GI
2a07:3bc0::,2a07:3bc7:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3c00::,2a07:3c07:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3c40::,2a07:3c47:ffff:ffff:ffff:ffff:ffff:ffff,DE
2a07:3c80::,2a07:3c87:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3d00::,2a07:3d07:ffff:ffff:ffff:ffff:ffff:ffff,IT
2a07:3d80::,2a07:3d87:ffff:ffff:ffff:ffff:ffff:ffff,CZ
2a07:3dc0::,2a07:3dc7:ffff:ffff:ffff:ffff:ffff:ffff,DK
2a07:3e00::,2a07:3e07:ffff:ffff:ffff:ffff:ffff:ffff,CH
2a07:3e40::,2a07:3e47:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:3e80::,2a07:3e87:ffff:ffff:ffff:ffff:ffff:ffff,NL
2a07:3ec0::,2a07:3ec7:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:3f00::,2a07:3f07:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:3f40::,2a07:3f47:ffff:ffff:ffff:ffff:ffff:ffff,IE
2a07:3f80::,2a07:3f87:ffff:ffff:ffff:ffff:ffff:ffff,SK
2a07:3fc0::,2a07:3fc7:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:4000::,2a07:4007:ffff:ffff:ffff:ffff:ffff:ffff,NO
2a07:4040::,2a07:4047:ffff:ffff:ffff:ffff:ffff:ffff,SE
2a07:4080::,2a07:4087:ffff:ffff:ffff:ffff:ffff:ffff,AT
2a07:40c0::,2a07:40c7:ffff:ffff:ffff:ffff:ffff:ffff,IL
2a07:4100::,2a07:4107:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:4140::,2a07:4147:ffff:ffff:ffff:ffff:ffff:ffff,MD
2a07:4180::,2a07:4187:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:41c0::,2a07:41c7:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:4200::,2a07:4207:ffff:ffff:ffff:ffff:ffff:ffff,FR
2a07:4240::,2a07:4247:ffff:ffff:ffff:ffff:ffff:ffff,RU
2a07:4280::,2a07:4287:ffff:ffff:ffff:ffff:ffff:ffff,GB
2a07:42c0::,2a07:42c7:ffff:ffff:ffff:ffff:ffff:ffff,DK
2a07:4340::,2a07:4347:ffff:ffff:ffff:ffff:ffff:ffff,AE
2a0c:af80::,2a0c:af87:ffff:ffff:ffff:ffff:ffff:ffff,GB
2c0f:f950::,2c0f:f950:ffff:ffff:ffff:ffff:ffff:ffff,SS
2c0f:f958::,2c0f:f958:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f960::,2c0f:f960:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:f968::,2c0f:f968:ffff:ffff:ffff:ffff:ffff:ffff,MZ
2c0f:f970::,2c0f:f970:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f978::,2c0f:f978:ffff:ffff:ffff:ffff:ffff:ffff,CD
2c0f:f980::,2c0f:f980:ffff:ffff:ffff:ffff:ffff:ffff,NA
2c0f:f988::,2c0f:f988:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f990::,2c0f:f990:ffff:ffff:ffff:ffff:ffff:ffff,GN
2c0f:f998::,2c0f:f998:ffff:ffff:ffff:ffff:ffff:ffff,MR
2c0f:f9a0::,2c0f:f9a0:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:f9a8::,2c0f:f9a8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f9b0::,2c0f:f9b0:ffff:ffff:ffff:ffff:ffff:ffff,GA
2c0f:f9b8::,2c0f:f9b8:1:ffff:ffff:ffff:ffff:ffff,MU
2c0f:f9b8:2::,2c0f:f9b8:2:ffff:ffff:ffff:ffff:ffff,US
2c0f:f9b8:3::,2c0f:f9b8:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:f9c0::,2c0f:f9c0:ffff:ffff:ffff:ffff:ffff:ffff,BW
2c0f:f9c8::,2c0f:f9c8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f9d0::,2c0f:f9d0:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:f9d8::,2c0f:f9d8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f9e0::,2c0f:f9e0:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:f9e8::,2c0f:f9e8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:f9f0::,2c0f:f9f0:ffff:ffff:ffff:ffff:ffff:ffff,MG
2c0f:f9f8::,2c0f:f9f8:ffff:ffff:ffff:ffff:ffff:ffff,BJ
2c0f:fa00::,2c0f:fa00:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fa08::,2c0f:fa08:ffff:ffff:ffff:ffff:ffff:ffff,CD
2c0f:fa10::,2c0f:fa10:fffc:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fa10:fffd::,2c0f:fa10:fffd:7fff:ffff:ffff:ffff:ffff,ZM
2c0f:fa10:fffd:8000::,2c0f:fa10:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fa18::,2c0f:fa18:ffff:ffff:ffff:ffff:ffff:ffff,MA
2c0f:fa20::,2c0f:fa20:ffff:ffff:ffff:ffff:ffff:ffff,SS
2c0f:fa28::,2c0f:fa28:ffff:ffff:ffff:ffff:ffff:ffff,MG
2c0f:fa38::,2c0f:fa38:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fa40::,2c0f:fa40:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fa48::,2c0f:fa48:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fa58::,2c0f:fa58:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fa60::,2c0f:fa60:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fa68::,2c0f:fa68:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fa70::,2c0f:fa70:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fa78::,2c0f:fa78:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fa80::,2c0f:fa80:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fa88::,2c0f:fa88:ffff:ffff:ffff:ffff:ffff:ffff,ST
2c0f:fa90::,2c0f:fa90:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fa98::,2c0f:fa98:ffff:ffff:ffff:ffff:ffff:ffff,ZW
2c0f:faa0::,2c0f:faa7:ffff:ffff:ffff:ffff:ffff:ffff,SD
2c0f:fab0::,2c0f:fabf:ffff:ffff:ffff:ffff:ffff:ffff,TN
2c0f:fac0::,2c0f:fac0:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:fac8::,2c0f:fac8:ffff:ffff:ffff:ffff:ffff:ffff,BW
2c0f:fad8::,2c0f:fad8:ffff:ffff:ffff:ffff:ffff:ffff,CM
2c0f:fae0::,2c0f:fae0:ffff:ffff:ffff:ffff:ffff:ffff,CM
2c0f:fae8::,2c0f:fae8:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:faf0::,2c0f:faf0:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:faf8::,2c0f:faf8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb00::,2c0f:fb00:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fb08::,2c0f:fb08:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb10::,2c0f:fb10:ffff:ffff:ffff:ffff:ffff:ffff,LY
2c0f:fb18::,2c0f:fb18:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb20::,2c0f:fb20:ffff:ffff:ffff:ffff:ffff:ffff,MA
2c0f:fb30::,2c0f:fb30:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb38::,2c0f:fb38:ffff:ffff:ffff:ffff:ffff:ffff,SO
2c0f:fb40::,2c0f:fb40:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb48::,2c0f:fb48:ffff:ffff:ffff:ffff:ffff:ffff,MZ
2c0f:fb50::,2c0f:fb50:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fb58::,2c0f:fb58:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fb60::,2c0f:fb60:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fb68::,2c0f:fb68:ffff:ffff:ffff:ffff:ffff:ffff,LS
2c0f:fb70::,2c0f:fb70:ffff:ffff:ffff:ffff:ffff:ffff,AO
2c0f:fb78::,2c0f:fb78:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fb80::,2c0f:fb80:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fb88::,2c0f:fb88:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fb90::,2c0f:fb90:ffff:ffff:ffff:ffff:ffff:ffff,MZ
2c0f:fb98::,2c0f:fb98:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fba0::,2c0f:fba0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fba8::,2c0f:fba8:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fbb0::,2c0f:fbb0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fbb8::,2c0f:fbb8:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fbc0::,2c0f:fbc0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fbc8::,2c0f:fbc8:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fbd0::,2c0f:fbd0:ffff:ffff:ffff:ffff:ffff:ffff,GN
2c0f:fbd8::,2c0f:fbd8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fbe0::,2c0f:fc1f:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fc40::,2c0f:fc40:ffff:ffff:ffff:ffff:ffff:ffff,EG
2c0f:fc48::,2c0f:fc48:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:fc58::,2c0f:fc58:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:fc60::,2c0f:fc61:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fc68::,2c0f:fc68:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fc70::,2c0f:fc70:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fc80::,2c0f:fc80:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fc88::,2c0f:fc89:ffff:ffff:ffff:ffff:ffff:ffff,EG
2c0f:fc90::,2c0f:fc90:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fc98::,2c0f:fc98:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fca0::,2c0f:fca0:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fca8::,2c0f:fca8:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fcb0::,2c0f:fcb0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fcb8::,2c0f:fcb8:ffff:ffff:ffff:ffff:ffff:ffff,GM
2c0f:fcc8::,2c0f:fcc8:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fcd0::,2c0f:fcd0:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fcd8::,2c0f:fcd8:ffff:ffff:ffff:ffff:ffff:ffff,SO
2c0f:fce0::,2c0f:fce0:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fce8::,2c0f:fce8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fcf0::,2c0f:fcf0:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fcf8::,2c0f:fcf8:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fd00::,2c0f:fd00:ffff:ffff:ffff:ffff:ffff:ffff,LS
2c0f:fd08::,2c0f:fd08:ffff:ffff:ffff:ffff:ffff:ffff,GM
2c0f:fd10::,2c0f:fd10:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fd18::,2c0f:fd18:ffff:ffff:ffff:ffff:ffff:ffff,SC
2c0f:fd20::,2c0f:fd20:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fd28::,2c0f:fd28:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fd30::,2c0f:fd30:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fd38::,2c0f:fd38:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fd40::,2c0f:fd40:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fd48::,2c0f:fd48:ffff:ffff:ffff:ffff:ffff:ffff,ZW
2c0f:fd50::,2c0f:fd50:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:fd58::,2c0f:fd58:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fd60::,2c0f:fd60:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fd68::,2c0f:fd68:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fd78::,2c0f:fd78:ffff:ffff:ffff:ffff:ffff:ffff,BI
2c0f:fd80::,2c0f:fd80:ffff:ffff:ffff:ffff:ffff:ffff,BF
2c0f:fd88::,2c0f:fd88:ffff:ffff:ffff:ffff:ffff:ffff,GH
2c0f:fd90::,2c0f:fd90:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fd98::,2c0f:fd98:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fda0::,2c0f:fda0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fda8::,2c0f:fda8:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fdb0::,2c0f:fdb0:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fdb8::,2c0f:fdb8:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fdc0::,2c0f:fdc0:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fdc8::,2c0f:fdc8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fdd0::,2c0f:fdd0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fdd8::,2c0f:fdd8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fde8::,2c0f:fde8:ffff:ffff:ffff:ffff:ffff:ffff,MW
2c0f:fdf0::,2c0f:fdf0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fdf8::,2c0f:fdf8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe08::,2c0f:fe08:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fe10::,2c0f:fe10:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fe18::,2c0f:fe18:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe20::,2c0f:fe20:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe28::,2c0f:fe28:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe30::,2c0f:fe30:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fe38::,2c0f:fe38:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fe40::,2c0f:fe40:8001:f:ffff:ffff:ffff:ffff,MU
2c0f:fe40:8001:10::,2c0f:fe40:8001:10:ffff:ffff:ffff:ffff,KE
2c0f:fe40:8001:11::,2c0f:fe40:80fe:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fe40:80ff::,2c0f:fe40:80ff:7fff:ffff:ffff:ffff:ffff,KE
2c0f:fe40:80ff:8000::,2c0f:fe40:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fe50::,2c0f:fe50:ffff:ffff:ffff:ffff:ffff:ffff,DZ
2c0f:fe58::,2c0f:fe58:ffff:ffff:ffff:ffff:ffff:ffff,LS
2c0f:fe60::,2c0f:fe60:ffff:ffff:ffff:ffff:ffff:ffff,RW
2c0f:fe68::,2c0f:fe68:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:fe70::,2c0f:fe70:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fe78::,2c0f:fe78:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe88::,2c0f:fe88:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:fe90::,2c0f:fe90:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fe98::,2c0f:fe98:ffff:ffff:ffff:ffff:ffff:ffff,TZ
2c0f:fea0::,2c0f:fea0:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fea8::,2c0f:fea8:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:feb0::,2c0f:feb0:16:ffff:ffff:ffff:ffff:ffff,MU
2c0f:feb0:17::,2c0f:feb0:17:7fff:ffff:ffff:ffff:ffff,KE
2c0f:feb0:17:8000::,2c0f:feb0:1e:ffff:ffff:ffff:ffff:ffff,MU
2c0f:feb0:1f::,2c0f:feb0:1f:7fff:ffff:ffff:ffff:ffff,ZA
2c0f:feb0:1f:8000::,2c0f:feb0:1f:ffff:ffff:ffff:ffff:ffff,MU
2c0f:feb0:20::,2c0f:feb0:20:7fff:ffff:ffff:ffff:ffff,ZA
2c0f:feb0:20:8000::,2c0f:feb0:2f:7fff:ffff:ffff:ffff:ffff,MU
2c0f:feb0:2f:8000::,2c0f:feb0:2f:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:feb0:30::,2c0f:feb1:ffff:ffff:ffff:ffff:ffff:ffff,MU
2c0f:feb8::,2c0f:feb8:ffff:ffff:ffff:ffff:ffff:ffff,ZM
2c0f:fec0::,2c0f:fec0:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:fec8::,2c0f:fec8:ffff:ffff:ffff:ffff:ffff:ffff,SD
2c0f:fed8::,2c0f:fed8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:fee0::,2c0f:fee0:ffff:ffff:ffff:ffff:ffff:ffff,EG
2c0f:fef0::,2c0f:fef0:ffff:ffff:ffff:ffff:ffff:ffff,SC
2c0f:fef8::,2c0f:fef8:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:ff00::,2c0f:ff00:ffff:ffff:ffff:ffff:ffff:ffff,BW
2c0f:ff08::,2c0f:ff08:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ff10::,2c0f:ff10:ffff:ffff:ffff:ffff:ffff:ffff,CD
2c0f:ff18::,2c0f:ff18:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:ff20::,2c0f:ff20:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:ff28::,2c0f:ff28:ffff:ffff:ffff:ffff:ffff:ffff,SD
2c0f:ff30::,2c0f:ff30:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ff40::,2c0f:ff80:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ff88::,2c0f:ff88:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:ff90::,2c0f:ff90:ffff:ffff:ffff:ffff:ffff:ffff,KE
2c0f:ff98::,2c0f:ff98:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:ffa0::,2c0f:ffa0:ffff:ffff:ffff:ffff:ffff:ffff,UG
2c0f:ffa8::,2c0f:ffa8:ffff:ffff:ffff:ffff:ffff:ffff,LS
2c0f:ffb0::,2c0f:ffb0:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:ffb8::,2c0f:ffb8:ffff:ffff:ffff:ffff:ffff:ffff,SD
2c0f:ffc0::,2c0f:ffc0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ffc8::,2c0f:ffc8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ffd0::,2c0f:ffd0:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ffd8::,2c0f:ffd8:ffff:ffff:ffff:ffff:ffff:ffff,ZA
2c0f:ffe8::,2c0f:ffe8:ffff:ffff:ffff:ffff:ffff:ffff,NG
2c0f:fff0::,2c0f:fff0:ffff:ffff:ffff:ffff:ffff:ffff,NG
0707010000001A000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001800000000snowflake-2.11.0/client0707010000001B000081A400000000000000000000000167D9BD4E00001442000000000000000000000000000000000000002200000000snowflake-2.11.0/client/README.md<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Dependencies](#dependencies)
- [Building the Snowflake client](#building-the-snowflake-client)
- [Running the Snowflake client with Tor](#running-the-snowflake-client-with-tor)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

This is the Tor client component of Snowflake.

It is based on the [goptlib](https://gitweb.torproject.org/pluggable-transports/goptlib.git/) pluggable transports library for Tor.


### Dependencies

- Go 1.15+
- We use the [pion/webrtc](https://github.com/pion/webrtc) library for WebRTC communication with Snowflake proxies. Note: running `go get` will fetch this dependency automatically during the build process.

### Building the Snowflake client

To build the Snowflake client, make sure you are in the `client/` directory, and then run:

```
go get
go build
```

### Running the Snowflake client with Tor

The Snowflake client can be configured with SOCKS options. We have a few example `torrc` files in this directory. We recommend the following `torrc` options by default:
```
UseBridges 1

ClientTransportPlugin snowflake exec ./client -log snowflake.log

Bridge snowflake 192.0.2.3:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 url=https://snowflake-broker.torproject.net.global.prod.fastly.net/ fronts=foursquare.com,github.githubassets.com ice=stun:stun.l.google.com:19302,stun:stun.antisip.com:3478,stun:stun.bluesip.net:3478,stun:stun.dus.net:3478,stun:stun.epygi.com:3478,stun:stun.sonetel.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.voys.nl:3478 utls-imitate=hellorandomizedalpn
```

`fingerprint=` is the fingerprint of bridge that the client will ultimately be connecting to.

`url=` is the URL of a broker instance. If you would like to try out Snowflake with your own broker, simply provide the URL of your broker instance with this option.

`fronts=` is an optional, comma-seperated list front domains for the broker request.

`ice=` is a comma-separated list of ICE servers. These must be STUN (over UDP) servers with the form stun:<var>host</var>[:<var>port</var>]. We recommend using servers that have implemented NAT discovery. See our wiki page on [NAT traversal](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/NAT-matching) for more information.

`utls-imitate=` configuration instructs the client to use fingerprinting resistance when connecting when rendez-vous'ing with the broker.

To bootstrap Tor, run:
```
tor -f torrc
```
This should start the client plugin, bootstrapping to 100% using WebRTC.

### Registration methods

The Snowflake client supports a few different ways of communicating with the broker.
This initial step is sometimes called rendezvous.

#### Domain fronting HTTPS

For domain fronting rendezvous, use the `-url` and `-front` command-line options together.
[Domain fronting](https://www.bamsoftware.com/papers/fronting/)
hides the externally visible domain name from an external observer,
making it appear that the Snowflake client is communicating with some server
other than the Snowflake broker.

* `-url` is the HTTPS URL of a forwarder to the broker, on some service that supports domain fronting, such as a CDN.
* `-front` is the domain name to show externally. It must be another domain on the same service.

Example:
```
-url https://snowflake-broker.torproject.net.global.prod.fastly.net/ \
-front cdn.sstatic.net \
```

#### AMP cache

For AMP cache rendezvous, use the `-url`, `-ampcache`, and `-front` command-line options together.
[AMP](https://amp.dev/documentation/) is a standard for web pages for mobile computers.
An [AMP cache](https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/how_amp_pages_are_cached/)
is a cache and proxy specialized for AMP pages.
The Snowflake broker has the ability to make its client registration responses look like AMP pages,
so it can be accessed through an AMP cache.
When you use AMP cache rendezvous, it appears to an observer that the Snowflake client
is accessing an AMP cache, or some other domain operated by the same organization.
You still need to use the `-front` command-line option, because the
[format of AMP cache URLs](https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/)
would otherwise reveal the domain name of the broker.

There is only one AMP cache that works with this option,
the Google AMP cache at https://cdn.ampproject.org/.

* `-url` is the HTTPS URL of the broker.
* `-ampcache` is `https://cdn.ampproject.org/`.
* `-front` is any Google domain, such as `www.google.com`.

Example:
```
-url https://snowflake-broker.torproject.net/ \
-ampcache https://cdn.ampproject.org/ \
-front www.google.com \
```

#### Direct access

It is also possible to access the broker directly using HTTPS, without domain fronting,
for testing purposes. This mode is not suitable for circumvention, because the
broker is easily blocked by its address.
0707010000001C000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001C00000000snowflake-2.11.0/client/lib0707010000001D000081A400000000000000000000000167D9BD4E00000333000000000000000000000000000000000000002A00000000snowflake-2.11.0/client/lib/interfaces.gopackage snowflake_client

// Tongue is an interface for catching Snowflakes. (aka the remote dialer)
type Tongue interface {
	// Catch makes a connection to a new snowflake.
	Catch() (*WebRTCPeer, error)

	// GetMax returns the maximum number of snowflakes a client can have.
	GetMax() int
}

// SnowflakeCollector is an interface for managing a client's collection of snowflakes.
type SnowflakeCollector interface {
	// Collect adds a snowflake to the collection.
	// The implementation of Collect should decide how to connect to and maintain
	// the connection to the WebRTCPeer.
	Collect() (*WebRTCPeer, error)

	// Pop removes and returns the most available snowflake from the collection.
	Pop() *WebRTCPeer

	// Melted returns a channel that will signal when the collector has stopped.
	Melted() <-chan struct{}
}
0707010000001E000081A400000000000000000000000167D9BD4E000013B2000000000000000000000000000000000000002800000000snowflake-2.11.0/client/lib/lib_test.gopackage snowflake_client

import (
	"fmt"
	"net"
	"testing"
	"time"

	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
)

type FakeDialer struct {
	max int
}

func (w FakeDialer) Catch() (*WebRTCPeer, error) {
	fmt.Println("Caught a dummy snowflake.")
	return &WebRTCPeer{closed: make(chan struct{})}, nil
}

func (w FakeDialer) GetMax() int {
	return w.max
}

type FakeSocksConn struct {
	net.Conn
	rejected bool
}

func (f FakeSocksConn) Reject() error {
	f.rejected = true
	return nil
}
func (f FakeSocksConn) Grant(addr *net.TCPAddr) error { return nil }

func TestSnowflakeClient(t *testing.T) {

	Convey("Peers", t, func() {
		Convey("Can construct", func() {
			d := &FakeDialer{max: 1}
			p, _ := NewPeers(d)
			So(p.Tongue.GetMax(), ShouldEqual, 1)
			So(p.snowflakeChan, ShouldNotBeNil)
			So(cap(p.snowflakeChan), ShouldEqual, 1)
		})

		Convey("Collecting a Snowflake requires a Tongue.", func() {
			p, err := NewPeers(nil)
			So(err, ShouldNotBeNil)
			// Set the dialer so that collection is possible.
			d := &FakeDialer{max: 1}
			p, err = NewPeers(d)
			_, err = p.Collect()
			So(err, ShouldBeNil)
			So(p.Count(), ShouldEqual, 1)
			// S
			_, err = p.Collect()
		})

		Convey("Collection continues until capacity.", func() {
			c := 5
			p, _ := NewPeers(FakeDialer{max: c})
			// Fill up to capacity.
			for i := 0; i < c; i++ {
				fmt.Println("Adding snowflake ", i)
				_, err := p.Collect()
				So(err, ShouldBeNil)
				So(p.Count(), ShouldEqual, i+1)
			}
			// But adding another gives an error.
			So(p.Count(), ShouldEqual, c)
			_, err := p.Collect()
			So(err, ShouldNotBeNil)
			So(p.Count(), ShouldEqual, c)

			// But popping allows it to continue.
			s := p.Pop()
			s.Close()
			So(s, ShouldNotBeNil)
			So(p.Count(), ShouldEqual, c-1)

			_, err = p.Collect()
			So(err, ShouldBeNil)
			So(p.Count(), ShouldEqual, c)
		})

		Convey("Count correctly purges peers marked for deletion.", func() {
			p, _ := NewPeers(FakeDialer{max: 5})
			p.Collect()
			p.Collect()
			p.Collect()
			p.Collect()
			So(p.Count(), ShouldEqual, 4)
			s := p.Pop()
			s.Close()
			So(p.Count(), ShouldEqual, 3)
			s = p.Pop()
			s.Close()
			So(p.Count(), ShouldEqual, 2)
		})

		Convey("End Closes all peers.", func() {
			cnt := 5
			p, _ := NewPeers(FakeDialer{max: cnt})
			for i := 0; i < cnt; i++ {
				p.activePeers.PushBack(&WebRTCPeer{closed: make(chan struct{})})
			}
			So(p.Count(), ShouldEqual, cnt)
			p.End()
			<-p.Melted()
			So(p.Count(), ShouldEqual, 0)
		})

		Convey("Pop skips over closed peers.", func() {
			p, _ := NewPeers(FakeDialer{max: 4})
			wc1, _ := p.Collect()
			wc2, _ := p.Collect()
			wc3, _ := p.Collect()
			So(wc1, ShouldNotBeNil)
			So(wc2, ShouldNotBeNil)
			So(wc3, ShouldNotBeNil)
			wc1.Close()
			r := p.Pop()
			So(p.Count(), ShouldEqual, 2)
			So(r, ShouldEqual, wc2)
			wc4, _ := p.Collect()
			wc2.Close()
			wc3.Close()
			r = p.Pop()
			So(r, ShouldEqual, wc4)
		})

		Convey("Terminate Connect() loop", func() {
			p, _ := NewPeers(FakeDialer{max: 4})
			go func() {
				for {
					p.Collect()
					select {
					case <-p.Melted():
						return
					default:
					}
				}
			}()
			<-time.After(10 * time.Second)

			p.End()
			<-p.Melted()
			So(p.Count(), ShouldEqual, 0)
		})

	})

	Convey("Dialers", t, func() {
		Convey("Can construct WebRTCDialer.", func() {
			broker := &BrokerChannel{}
			d := NewWebRTCDialer(broker, nil, 1)
			So(d, ShouldNotBeNil)
			So(d.BrokerChannel, ShouldNotBeNil)
		})
		SkipConvey("WebRTCDialer can Catch a snowflake.", func() {
			broker := &BrokerChannel{}
			d := NewWebRTCDialer(broker, nil, 1)
			conn, err := d.Catch()
			So(conn, ShouldBeNil)
			So(err, ShouldNotBeNil)
		})
	})

}

func TestWebRTCPeer(t *testing.T) {
	Convey("WebRTCPeer", t, func(c C) {
		p := &WebRTCPeer{closed: make(chan struct{}),
			eventsLogger: event.NewSnowflakeEventDispatcher()}
		Convey("checks for staleness", func() {
			go p.checkForStaleness(time.Second)
			<-time.After(2 * time.Second)
			So(p.Closed(), ShouldEqual, true)
		})
	})
}

func TestICEServerParser(t *testing.T) {
	Convey("Test parsing of ICE servers", t, func() {
		for _, test := range []struct {
			input  []string
			urls   [][]string
			length int
		}{
			{
				[]string{"stun:stun.l.google.com:19302", "stun:stun.ekiga.net"},
				[][]string{[]string{"stun:stun.l.google.com:19302"}, []string{"stun:stun.ekiga.net:3478"}},
				2,
			},
			{
				[]string{"stun:stun1.l.google.com:19302", "stun.ekiga.net", "stun:stun.example.com:1234/path?query",
					"https://example.com", "turn:relay.metered.ca:80?transport=udp"},
				[][]string{[]string{"stun:stun1.l.google.com:19302"}},
				1,
			},
		} {
			servers := parseIceServers(test.input)

			if test.urls == nil {
				So(servers, ShouldBeNil)
			} else {
				So(servers, ShouldNotBeNil)
			}

			So(len(servers), ShouldEqual, test.length)

			for _, server := range servers {
				So(test.urls, ShouldContain, server.URLs)
			}

		}

	})
}
0707010000001F000081A400000000000000000000000167D9BD4E00000EF8000000000000000000000000000000000000002500000000snowflake-2.11.0/client/lib/peers.gopackage snowflake_client

import (
	"container/list"
	"errors"
	"fmt"
	"log"
	"sync"
)

// Peers is a container that keeps track of multiple WebRTC remote peers.
// Implements |SnowflakeCollector|.
//
// Maintaining a set of pre-connected Peers with fresh but inactive datachannels
// allows allows rapid recovery when the current WebRTC Peer disconnects.
//
// Note: For now, only one remote can be active at any given moment.
// This is a property of Tor circuits & its current multiplexing constraints,
// but could be updated if that changes.
// (Also, this constraint does not necessarily apply to the more generic PT
// version of Snowflake)
type Peers struct {
	Tongue
	bytesLogger bytesLogger

	snowflakeChan chan *WebRTCPeer
	activePeers   *list.List

	melt chan struct{}

	collectLock sync.Mutex
	closeOnce   sync.Once
}

// NewPeers constructs a fresh container of remote peers.
func NewPeers(tongue Tongue) (*Peers, error) {
	p := &Peers{}
	// Use buffered go channel to pass snowflakes onwards to the SOCKS handler.
	if tongue == nil {
		return nil, errors.New("missing Tongue to catch Snowflakes with")
	}
	p.snowflakeChan = make(chan *WebRTCPeer, tongue.GetMax())
	p.activePeers = list.New()
	p.melt = make(chan struct{})
	p.Tongue = tongue
	return p, nil
}

// Collect connects to and adds a new remote peer as part of |SnowflakeCollector| interface.
func (p *Peers) Collect() (*WebRTCPeer, error) {
	// Engage the Snowflake Catching interface, which must be available.
	p.collectLock.Lock()
	defer p.collectLock.Unlock()
	select {
	case <-p.melt:
		return nil, fmt.Errorf("Snowflakes have melted")
	default:
	}
	if nil == p.Tongue {
		return nil, errors.New("missing Tongue to catch Snowflakes with")
	}
	cnt := p.Count()
	capacity := p.Tongue.GetMax()
	s := fmt.Sprintf("Currently at [%d/%d]", cnt, capacity)
	if cnt >= capacity {
		return nil, fmt.Errorf("At capacity [%d/%d]", cnt, capacity)
	}
	log.Println("WebRTC: Collecting a new Snowflake.", s)
	// BUG: some broker conflict here.
	connection, err := p.Tongue.Catch()
	if nil != err {
		return nil, err
	}
	// Track new valid Snowflake in internal collection and pass along.
	p.activePeers.PushBack(connection)
	p.snowflakeChan <- connection
	return connection, nil
}

// Pop blocks until an available, valid snowflake appears.
// Pop will return nil after End has been called.
func (p *Peers) Pop() *WebRTCPeer {
	for {
		snowflake, ok := <-p.snowflakeChan
		if !ok {
			return nil
		}
		if snowflake.Closed() {
			continue
		}
		// Set to use the same rate-limited traffic logger to keep consistency.
		snowflake.bytesLogger = p.bytesLogger
		return snowflake
	}
}

// Melted returns a channel that will close when peers stop being collected.
// Melted is a necessary part of |SnowflakeCollector| interface.
func (p *Peers) Melted() <-chan struct{} {
	return p.melt
}

// Count returns the total available Snowflakes (including the active ones)
// The count only reduces when connections themselves close, rather than when
// they are popped.
func (p *Peers) Count() int {
	p.purgeClosedPeers()
	return p.activePeers.Len()
}

func (p *Peers) purgeClosedPeers() {
	for e := p.activePeers.Front(); e != nil; {
		next := e.Next()
		conn := e.Value.(*WebRTCPeer)
		// Purge those marked for deletion.
		if conn.Closed() {
			p.activePeers.Remove(e)
		}
		e = next
	}
}

// End closes all active connections to Peers contained here, and stops the
// collection of future Peers.
func (p *Peers) End() {
	p.closeOnce.Do(func() {
		close(p.melt)
		p.collectLock.Lock()
		defer p.collectLock.Unlock()
		close(p.snowflakeChan)
		cnt := p.Count()
		for e := p.activePeers.Front(); e != nil; {
			next := e.Next()
			conn := e.Value.(*WebRTCPeer)
			conn.Close()
			p.activePeers.Remove(e)
			e = next
		}
		log.Printf("WebRTC: melted all %d snowflakes.", cnt)
	})
}
07070100000020000081A400000000000000000000000167D9BD4E00002915000000000000000000000000000000000000002A00000000snowflake-2.11.0/client/lib/rendezvous.go// WebRTC rendezvous requires the exchange of SessionDescriptions between
// peers in order to establish a PeerConnection.

package snowflake_client

import (
	"crypto/tls"
	"errors"
	"fmt"
	"log"
	"net/http"
	"net/url"
	"sync"
	"sync/atomic"
	"time"

	"github.com/pion/webrtc/v4"
	utls "github.com/refraction-networking/utls"

	utlsutil "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/utls"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/certs"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

const (
	brokerErrorUnexpected string = "Unexpected error, no answer."
	rendezvousErrorMsg    string = "One of SQS, AmpCache, or Domain Fronting rendezvous methods must be used."

	readLimit = 100000 //Maximum number of bytes to be read from an HTTP response
)

// RendezvousMethod represents a way of communicating with the broker: sending
// an encoded client poll request (SDP offer) and receiving an encoded client
// poll response (SDP answer) in return. RendezvousMethod is used by
// BrokerChannel, which is in charge of encoding and decoding, and all other
// tasks that are independent of the rendezvous method.
type RendezvousMethod interface {
	Exchange([]byte) ([]byte, error)
}

// BrokerChannel uses a RendezvousMethod to communicate with the Snowflake broker.
// The BrokerChannel is responsible for encoding and decoding SDP offers and answers;
// RendezvousMethod is responsible for the exchange of encoded information.
type BrokerChannel struct {
	Rendezvous         RendezvousMethod
	keepLocalAddresses bool
	natType            string
	lock               sync.Mutex
	BridgeFingerprint  string
}

// We make a copy of DefaultTransport because we want the default Dial
// and TLSHandshakeTimeout settings. But we want to disable the default
// ProxyFromEnvironment setting.
func createBrokerTransport(proxy *url.URL) http.RoundTripper {
	tlsConfig := &tls.Config{
		RootCAs: certs.GetRootCAs(),
	}
	transport := &http.Transport{TLSClientConfig: tlsConfig}
	transport.Proxy = nil
	if proxy != nil {
		transport.Proxy = http.ProxyURL(proxy)
	}
	transport.ResponseHeaderTimeout = 15 * time.Second
	return transport
}

func newBrokerChannelFromConfig(config ClientConfig) (*BrokerChannel, error) {
	log.Println("Rendezvous using Broker at:", config.BrokerURL)

	if len(config.FrontDomains) != 0 {
		log.Printf("Domain fronting using a randomly selected domain from: %v", config.FrontDomains)
	}

	brokerTransport := createBrokerTransport(config.CommunicationProxy)

	if config.UTLSClientID != "" {
		utlsClientHelloID, err := utlsutil.NameToUTLSID(config.UTLSClientID)
		if err != nil {
			return nil, fmt.Errorf("unable to create broker channel: %w", err)
		}
		utlsConfig := &utls.Config{
			RootCAs: certs.GetRootCAs(),
		}
		brokerTransport = utlsutil.NewUTLSHTTPRoundTripperWithProxy(utlsClientHelloID, utlsConfig, brokerTransport,
			config.UTLSRemoveSNI, config.CommunicationProxy)
	}

	var rendezvous RendezvousMethod
	var err error
	if config.SQSQueueURL != "" {
		if config.AmpCacheURL != "" || config.BrokerURL != "" {
			log.Fatalln("Multiple rendezvous methods specified. " + rendezvousErrorMsg)
		}
		if config.SQSCredsStr == "" {
			log.Fatalln("sqscreds must be specified to use SQS rendezvous method.")
		}
		log.Println("Through SQS queue at:", config.SQSQueueURL)
		rendezvous, err = newSQSRendezvous(config.SQSQueueURL, config.SQSCredsStr, brokerTransport)
	} else if config.AmpCacheURL != "" && config.BrokerURL != "" {
		log.Println("Through AMP cache at:", config.AmpCacheURL)
		rendezvous, err = newAMPCacheRendezvous(
			config.BrokerURL, config.AmpCacheURL, config.FrontDomains,
			brokerTransport)
	} else if config.BrokerURL != "" {
		rendezvous, err = newHTTPRendezvous(
			config.BrokerURL, config.FrontDomains, brokerTransport)
	} else {
		log.Fatalln("No rendezvous method was specified. " + rendezvousErrorMsg)
	}
	if err != nil {
		return nil, err
	}

	return &BrokerChannel{
		Rendezvous:         rendezvous,
		keepLocalAddresses: config.KeepLocalAddresses,
		natType:            nat.NATUnknown,
		BridgeFingerprint:  config.BridgeFingerprint,
	}, nil
}

// Negotiate uses a RendezvousMethod to send the client's WebRTC SDP offer
// and receive a snowflake proxy WebRTC SDP answer in return.
func (bc *BrokerChannel) Negotiate(
	offer *webrtc.SessionDescription,
	natTypeToSend string,
) (
	*webrtc.SessionDescription, error,
) {
	encReq, err := preparePollRequest(offer, natTypeToSend, bc.BridgeFingerprint)
	if err != nil {
		return nil, err
	}

	// Do the exchange using our RendezvousMethod.
	encResp, err := bc.Rendezvous.Exchange(encReq)
	if err != nil {
		return nil, err
	}
	log.Printf("Received answer: %s", string(encResp))

	// Decode the client poll response.
	resp, err := messages.DecodeClientPollResponse(encResp)
	if err != nil {
		return nil, err
	}
	if resp.Error != "" {
		return nil, errors.New(resp.Error)
	}
	return util.DeserializeSessionDescription(resp.Answer)
}

// Pure function
func preparePollRequest(
	offer *webrtc.SessionDescription,
	natType string,
	bridgeFingerprint string,
) (encReq []byte, err error) {
	offerSDP, err := util.SerializeSessionDescription(offer)
	if err != nil {
		return nil, err
	}
	req := &messages.ClientPollRequest{
		Offer:       offerSDP,
		NAT:         natType,
		Fingerprint: bridgeFingerprint,
	}
	encReq, err = req.EncodeClientPollRequest()
	return
}

// SetNATType sets the NAT type of the client so we can send it to the WebRTC broker.
func (bc *BrokerChannel) SetNATType(NATType string) {
	bc.lock.Lock()
	bc.natType = NATType
	bc.lock.Unlock()
	log.Printf("NAT Type: %s", NATType)
}

func (bc *BrokerChannel) GetNATType() string {
	bc.lock.Lock()
	defer bc.lock.Unlock()
	return bc.natType
}

// All of the methods of the struct are thread-safe.
type NATPolicy struct {
	assumedUnrestrictedNATAndFailedToConnect atomic.Bool
}

// When our NAT type is unknown, we want to try to connect to a
// restricted / unknown proxy initially
// to offload the unrestricted ones.
// So, instead of always sending the actual NAT type,
// we should use this function to determine the NAT type to send.
//
// This is useful when our STUN servers are blocked or don't support
// the NAT discovery feature, or if they're just slow.
func (p *NATPolicy) NATTypeToSend(actualNatType string) string {
	if !p.assumedUnrestrictedNATAndFailedToConnect.Load() &&
		actualNatType == nat.NATUnknown {
		// If our NAT type is unknown, and we haven't failed to connect
		// with a spoofed NAT type yet, then spoof a NATUnrestricted
		// type.
		return nat.NATUnrestricted
	} else {
		// In all other cases, do not spoof, and just return our actual
		// NAT type (even if it is NATUnknown).
		return actualNatType
	}
}

// This function must be called whenever a connection with a proxy succeeds,
// because the connection outcome tells us about NAT compatibility
// between the proxy and us.
func (p *NATPolicy) Success(actualNATType, sentNATType string) {
	// Yes, right now this does nothing but log.
	if actualNATType != sentNATType {
		log.Printf(
			"Connected to a proxy by using a spoofed NAT type \"%v\"! "+
				"Our actual NAT type was \"%v\"",
			sentNATType,
			actualNATType,
		)
	}
}

// This function must be called whenever a connection with a proxy fails,
// because the connection outcome tells us about NAT compatibility
// between the proxy and us.
func (p *NATPolicy) Failure(actualNATType, sentNATType string) {
	if actualNATType == nat.NATUnknown && sentNATType == nat.NATUnrestricted {
		log.Printf(
			"Tried to connect to a restricted proxy while our NAT type "+
				"is \"%v\", and failed. Let's not do that again.",
			actualNATType,
		)
		p.assumedUnrestrictedNATAndFailedToConnect.Store(true)
	}
}

// WebRTCDialer implements the |Tongue| interface to catch snowflakes, using BrokerChannel.
type WebRTCDialer struct {
	*BrokerChannel
	// Can be `nil`, in which case we won't apply special logic,
	// and simply always send the current NAT type instead.
	natPolicy    *NATPolicy
	webrtcConfig *webrtc.Configuration
	max          int

	eventLogger event.SnowflakeEventReceiver
	proxy       *url.URL
}

// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead
func NewWebRTCDialer(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int) *WebRTCDialer {
	return NewWebRTCDialerWithNatPolicyAndEventsAndProxy(
		broker, nil, iceServers, max, nil, nil,
	)
}

// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead
func NewWebRTCDialerWithEvents(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int, eventLogger event.SnowflakeEventReceiver) *WebRTCDialer {
	return NewWebRTCDialerWithNatPolicyAndEventsAndProxy(
		broker, nil, iceServers, max, eventLogger, nil,
	)
}

// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead
func NewWebRTCDialerWithEventsAndProxy(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int,
	eventLogger event.SnowflakeEventReceiver, proxy *url.URL,
) *WebRTCDialer {
	return NewWebRTCDialerWithNatPolicyAndEventsAndProxy(
		broker,
		nil,
		iceServers,
		max,
		eventLogger,
		proxy,
	)
}

// NewWebRTCDialerWithNatPolicyAndEventsAndProxy constructs a new WebRTCDialer.
func NewWebRTCDialerWithNatPolicyAndEventsAndProxy(
	broker *BrokerChannel,
	natPolicy *NATPolicy,
	iceServers []webrtc.ICEServer,
	max int,
	eventLogger event.SnowflakeEventReceiver,
	proxy *url.URL,
) *WebRTCDialer {
	config := webrtc.Configuration{
		ICEServers: iceServers,
	}

	return &WebRTCDialer{
		BrokerChannel: broker,
		natPolicy:     natPolicy,
		webrtcConfig:  &config,
		max:           max,

		eventLogger: eventLogger,
		proxy:       proxy,
	}
}

// Catch initializes a WebRTC Connection by signaling through the BrokerChannel.
func (w WebRTCDialer) Catch() (*WebRTCPeer, error) {
	// TODO: [#25591] Fetch ICE server information from Broker.
	// TODO: [#25596] Consider TURN servers here too.
	return NewWebRTCPeerWithNatPolicyAndEventsAndProxy(
		w.webrtcConfig, w.BrokerChannel, w.natPolicy, w.eventLogger, w.proxy,
	)
}

// GetMax returns the maximum number of snowflakes to collect.
func (w WebRTCDialer) GetMax() int {
	return w.max
}
07070100000021000081A400000000000000000000000167D9BD4E00000F65000000000000000000000000000000000000003300000000snowflake-2.11.0/client/lib/rendezvous_ampcache.gopackage snowflake_client

import (
	"errors"
	"io"
	"log"
	"math/rand"
	"net/http"
	"net/url"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp"
)

// ampCacheRendezvous is a RendezvousMethod that communicates with the
// .../amp/client route of the broker, optionally over an AMP cache proxy, and
// with optional domain fronting.
type ampCacheRendezvous struct {
	brokerURL *url.URL
	cacheURL  *url.URL          // Optional AMP cache URL.
	fronts    []string          // Optional front domains to replace url.Host in requests.
	transport http.RoundTripper // Used to make all requests.
}

// newAMPCacheRendezvous creates a new ampCacheRendezvous that contacts the
// broker at the given URL, optionally proxying through an AMP cache, and with
// an optional front domain. transport is the http.RoundTripper used to make all
// requests.
func newAMPCacheRendezvous(broker, cache string, fronts []string, transport http.RoundTripper) (*ampCacheRendezvous, error) {
	brokerURL, err := url.Parse(broker)
	if err != nil {
		return nil, err
	}
	var cacheURL *url.URL
	if cache != "" {
		var err error
		cacheURL, err = url.Parse(cache)
		if err != nil {
			return nil, err
		}
	}
	return &ampCacheRendezvous{
		brokerURL: brokerURL,
		cacheURL:  cacheURL,
		fronts:    fronts,
		transport: transport,
	}, nil
}

func (r *ampCacheRendezvous) Exchange(encPollReq []byte) ([]byte, error) {
	log.Println("Negotiating via AMP cache rendezvous...")
	log.Println("Broker URL:", r.brokerURL)
	log.Println("AMP cache URL:", r.cacheURL)

	// We cannot POST a body through an AMP cache, so instead we GET and
	// encode the client poll request message into the URL.
	reqURL := r.brokerURL.ResolveReference(&url.URL{
		Path: "amp/client/" + amp.EncodePath(encPollReq),
	})

	if r.cacheURL != nil {
		// Rewrite reqURL to its AMP cache version.
		var err error
		reqURL, err = amp.CacheURL(reqURL, r.cacheURL, "c")
		if err != nil {
			return nil, err
		}
	}

	req, err := http.NewRequest("GET", reqURL.String(), nil)
	if err != nil {
		return nil, err
	}

	if len(r.fronts) != 0 {
		// Do domain fronting. Replace the domain in the URL's with a randomly
		// selected front, and store the original domain the HTTP Host header.
		rand.Seed(time.Now().UnixNano())
		front := r.fronts[rand.Intn(len(r.fronts))]
		log.Println("Front domain:", front)
		req.Host = req.URL.Host
		req.URL.Host = front
	}

	resp, err := r.transport.RoundTrip(req)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()

	log.Printf("AMP cache rendezvous response: %s", resp.Status)
	if resp.StatusCode != http.StatusOK {
		// A non-200 status indicates an error:
		// * If the broker returns a page with invalid AMP, then the AMP
		//   cache returns a redirect that would bypass the cache.
		// * If the broker returns a 5xx status, the AMP cache
		//   translates it to a 404.
		// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#redirect-%26-error-handling
		return nil, errors.New(brokerErrorUnexpected)
	}
	if _, err := resp.Location(); err == nil {
		// The Google AMP Cache may return a "silent redirect" with
		// status 200, a Location header set, and a JavaScript redirect
		// in the body. The redirect points directly at the origin
		// server for the request (bypassing the AMP cache). We do not
		// follow redirects nor execute JavaScript, but in any case we
		// cannot extract information from this response and can only
		// treat it as an error.
		return nil, errors.New(brokerErrorUnexpected)
	}

	lr := io.LimitReader(resp.Body, readLimit+1)
	dec, err := amp.NewArmorDecoder(lr)
	if err != nil {
		return nil, err
	}
	encPollResp, err := io.ReadAll(dec)
	if err != nil {
		return nil, err
	}
	if lr.(*io.LimitedReader).N == 0 {
		// We hit readLimit while decoding AMP armor, that's an error.
		return nil, io.ErrUnexpectedEOF
	}

	return encPollResp, err
}
07070100000022000081A400000000000000000000000167D9BD4E000008DA000000000000000000000000000000000000002F00000000snowflake-2.11.0/client/lib/rendezvous_http.gopackage snowflake_client

import (
	"bytes"
	"errors"
	"io"
	"log"
	"math/rand"
	"net/http"
	"net/url"
	"time"
)

// httpRendezvous is a RendezvousMethod that communicates with the .../client
// route of the broker over HTTP or HTTPS, with optional domain fronting.
type httpRendezvous struct {
	brokerURL *url.URL
	fronts    []string          // Optional front domain to replace url.Host in requests.
	transport http.RoundTripper // Used to make all requests.
}

// newHTTPRendezvous creates a new httpRendezvous that contacts the broker at
// the given URL, with an optional front domain. transport is the
// http.RoundTripper used to make all requests.
func newHTTPRendezvous(broker string, fronts []string, transport http.RoundTripper) (*httpRendezvous, error) {
	brokerURL, err := url.Parse(broker)
	if err != nil {
		return nil, err
	}
	return &httpRendezvous{
		brokerURL: brokerURL,
		fronts:    fronts,
		transport: transport,
	}, nil
}

func (r *httpRendezvous) Exchange(encPollReq []byte) ([]byte, error) {
	log.Println("Negotiating via HTTP rendezvous...")
	log.Println("Target URL: ", r.brokerURL.Host)

	// Suffix the path with the broker's client registration handler.
	reqURL := r.brokerURL.ResolveReference(&url.URL{Path: "client"})
	req, err := http.NewRequest("POST", reqURL.String(), bytes.NewReader(encPollReq))
	if err != nil {
		return nil, err
	}

	if len(r.fronts) != 0 {
		// Do domain fronting. Replace the domain in the URL's with a randomly
		// selected front, and store the original domain the HTTP Host header.
		rand.Seed(time.Now().UnixNano())
		front := r.fronts[rand.Intn(len(r.fronts))]
		log.Println("Front URL:  ", front)
		req.Host = req.URL.Host
		req.URL.Host = front
	}

	resp, err := r.transport.RoundTrip(req)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()

	log.Printf("HTTP rendezvous response: %s", resp.Status)
	if resp.StatusCode != http.StatusOK {
		return nil, errors.New(brokerErrorUnexpected)
	}

	return limitedRead(resp.Body, readLimit)
}

func limitedRead(r io.Reader, limit int64) ([]byte, error) {
	p, err := io.ReadAll(&io.LimitedReader{R: r, N: limit + 1})
	if err != nil {
		return p, err
	} else if int64(len(p)) == limit+1 {
		return p[0:limit], io.ErrUnexpectedEOF
	}
	return p, err
}
07070100000023000081A400000000000000000000000167D9BD4E00000F90000000000000000000000000000000000000002E00000000snowflake-2.11.0/client/lib/rendezvous_sqs.gopackage snowflake_client

import (
	"context"
	"crypto/rand"
	"encoding/hex"
	"log"
	"net/http"
	"net/url"
	"regexp"
	"time"

	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/credentials"
	"github.com/aws/aws-sdk-go-v2/service/sqs"
	"github.com/aws/aws-sdk-go-v2/service/sqs/types"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient"
	sqscreds "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqscreds/lib"
)

type sqsRendezvous struct {
	transport  http.RoundTripper
	sqsClient  sqsclient.SQSClient
	sqsURL     *url.URL
	timeout    time.Duration
	numRetries int
}

func newSQSRendezvous(sqsQueue string, sqsCredsStr string, transport http.RoundTripper) (*sqsRendezvous, error) {
	sqsURL, err := url.Parse(sqsQueue)
	if err != nil {
		return nil, err
	}

	sqsCreds, err := sqscreds.AwsCredsFromBase64(sqsCredsStr)
	if err != nil {
		return nil, err
	}

	queueURL := sqsURL.String()
	hostName := sqsURL.Hostname()

	regionRegex, _ := regexp.Compile(`^sqs\.([\w-]+)\.amazonaws\.com$`)
	res := regionRegex.FindStringSubmatch(hostName)
	if len(res) < 2 {
		log.Fatal("Could not extract AWS region from SQS URL. Ensure that the SQS Queue URL provided is valid.")
	}
	region := res[1]
	cfg, err := config.LoadDefaultConfig(context.TODO(),
		config.WithCredentialsProvider(
			credentials.NewStaticCredentialsProvider(sqsCreds.AwsAccessKeyId, sqsCreds.AwsSecretKey, ""),
		),
		config.WithRegion(region),
	)
	if err != nil {
		log.Fatal(err)
	}
	client := sqs.NewFromConfig(cfg)

	log.Println("Queue URL: ", queueURL)

	return &sqsRendezvous{
		transport:  transport,
		sqsClient:  client,
		sqsURL:     sqsURL,
		timeout:    time.Second,
		numRetries: 5,
	}, nil
}

func (r *sqsRendezvous) Exchange(encPollReq []byte) ([]byte, error) {
	log.Println("Negotiating via SQS Queue rendezvous...")

	var id [8]byte
	_, err := rand.Read(id[:])
	if err != nil {
		return nil, err
	}
	sqsClientID := hex.EncodeToString(id[:])
	log.Println("SQS Client ID for rendezvous: " + sqsClientID)

	_, err = r.sqsClient.SendMessage(context.TODO(), &sqs.SendMessageInput{
		MessageAttributes: map[string]types.MessageAttributeValue{
			"ClientID": {
				DataType:    aws.String("String"),
				StringValue: aws.String(sqsClientID),
			},
		},
		MessageBody: aws.String(string(encPollReq)),
		QueueUrl:    aws.String(r.sqsURL.String()),
	})
	if err != nil {
		return nil, err
	}

	time.Sleep(r.timeout) // wait for client queue to be created by the broker

	var responseQueueURL *string
	for i := 0; i < r.numRetries; i++ {
		// The SQS queue corresponding to the client where the SDP Answer will be placed
		// may not be created yet. We will retry up to 5 times before we error out.
		var res *sqs.GetQueueUrlOutput
		res, err = r.sqsClient.GetQueueUrl(context.TODO(), &sqs.GetQueueUrlInput{
			QueueName: aws.String("snowflake-client-" + sqsClientID),
		})
		if err != nil {
			log.Println(err)
			log.Printf("Attempt %d of %d to retrieve URL of response SQS queue failed.\n", i+1, r.numRetries)
			time.Sleep(r.timeout)
		} else {
			responseQueueURL = res.QueueUrl
			break
		}
	}
	if err != nil {
		return nil, err
	}

	var answer string
	for i := 0; i < r.numRetries; i++ {
		// Waiting for SDP Answer from proxy to be placed in SQS queue.
		// We will retry upt to 5 times before we error out.
		res, err := r.sqsClient.ReceiveMessage(context.TODO(), &sqs.ReceiveMessageInput{
			QueueUrl:            responseQueueURL,
			MaxNumberOfMessages: 1,
			WaitTimeSeconds:     20,
		})
		if err != nil {
			return nil, err
		}
		if len(res.Messages) == 0 {
			log.Printf("Attempt %d of %d to receive message from response SQS queue failed. No message found in queue.\n", i+1, r.numRetries)
			delay := float64(i)/2.0 + 1
			time.Sleep(time.Duration(delay*1000) * (r.timeout / 1000))
		} else {
			answer = *res.Messages[0].Body
			break
		}
	}

	return []byte(answer), nil
}
07070100000024000081A400000000000000000000000167D9BD4E00003F4B000000000000000000000000000000000000002F00000000snowflake-2.11.0/client/lib/rendezvous_test.gopackage snowflake_client

import (
	"bytes"
	"errors"
	"fmt"
	"io"
	"net/http"
	"net/http/httptest"
	"net/url"
	"testing"

	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/service/sqs"
	"github.com/aws/aws-sdk-go-v2/service/sqs/types"
	"github.com/golang/mock/gomock"
	"github.com/pion/webrtc/v4"
	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

// mockTransport's RoundTrip method returns a response with a fake status and
// body.
type mockTransport struct {
	statusCode int
	body       []byte
}

func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	return &http.Response{
		Status:     fmt.Sprintf("%d %s", t.statusCode, http.StatusText(t.statusCode)),
		StatusCode: t.statusCode,
		Body:       io.NopCloser(bytes.NewReader(t.body)),
	}, nil
}

// errorTransport's RoundTrip method returns an error.
type errorTransport struct {
	err error
}

func (t errorTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	return nil, t.err
}

// makeEncPollReq returns an encoded client poll request containing a given
// offer.
func makeEncPollReq(offer string) []byte {
	encPollReq, err := (&messages.ClientPollRequest{
		Offer: offer,
		NAT:   nat.NATUnknown,
	}).EncodeClientPollRequest()
	if err != nil {
		panic(err)
	}
	return encPollReq
}

// makeEncPollResp returns an encoded client poll response with given answer and
// error strings.
func makeEncPollResp(answer, errorStr string) []byte {
	encPollResp, err := (&messages.ClientPollResponse{
		Answer: answer,
		Error:  errorStr,
	}).EncodePollResponse()
	if err != nil {
		panic(err)
	}
	return encPollResp
}

var fakeEncPollReq = makeEncPollReq(`{"type":"offer","sdp":"test"}`)

func TestHTTPRendezvous(t *testing.T) {
	Convey("HTTP rendezvous", t, func() {
		Convey("Construct httpRendezvous with no front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newHTTPRendezvous("http://test.broker", []string{}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.Host, ShouldResemble, "test.broker")
			So(rend.fronts, ShouldEqual, []string{})
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("Construct httpRendezvous *with* front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newHTTPRendezvous("http://test.broker", []string{"front"}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.Host, ShouldResemble, "test.broker")
			So(rend.fronts, ShouldContain, "front")
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("httpRendezvous.Exchange responds with answer", func() {
			fakeEncPollResp := makeEncPollResp(
				`{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`,
				"",
			)
			rend, err := newHTTPRendezvous("http://test.broker", []string{},
				&mockTransport{http.StatusOK, fakeEncPollResp})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldBeNil)
			So(answer, ShouldResemble, fakeEncPollResp)
		})

		Convey("httpRendezvous.Exchange responds with no answer", func() {
			fakeEncPollResp := makeEncPollResp(
				"",
				`{"error": "no snowflake proxies currently available"}`,
			)
			rend, err := newHTTPRendezvous("http://test.broker", []string{},
				&mockTransport{http.StatusOK, fakeEncPollResp})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldBeNil)
			So(answer, ShouldResemble, fakeEncPollResp)
		})

		Convey("httpRendezvous.Exchange fails with unexpected HTTP status code", func() {
			rend, err := newHTTPRendezvous("http://test.broker", []string{},
				&mockTransport{http.StatusInternalServerError, []byte{}})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldNotBeNil)
			So(answer, ShouldBeNil)
			So(err.Error(), ShouldResemble, brokerErrorUnexpected)
		})

		Convey("httpRendezvous.Exchange fails with error", func() {
			transportErr := errors.New("error")
			rend, err := newHTTPRendezvous("http://test.broker", []string{},
				&errorTransport{err: transportErr})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldEqual, transportErr)
			So(answer, ShouldBeNil)
		})

		Convey("httpRendezvous.Exchange fails with large read", func() {
			rend, err := newHTTPRendezvous("http://test.broker", []string{},
				&mockTransport{http.StatusOK, make([]byte, readLimit+1)})
			So(err, ShouldBeNil)
			_, err = rend.Exchange(fakeEncPollReq)
			So(err, ShouldEqual, io.ErrUnexpectedEOF)
		})
	})
}

func ampArmorEncode(p []byte) []byte {
	var buf bytes.Buffer
	enc, err := amp.NewArmorEncoder(&buf)
	if err != nil {
		panic(err)
	}
	_, err = enc.Write(p)
	if err != nil {
		panic(err)
	}
	err = enc.Close()
	if err != nil {
		panic(err)
	}
	return buf.Bytes()
}

func TestAMPCacheRendezvous(t *testing.T) {
	Convey("AMP cache rendezvous", t, func() {
		Convey("Construct ampCacheRendezvous with no cache and no front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.String(), ShouldResemble, "http://test.broker")
			So(rend.cacheURL, ShouldBeNil)
			So(rend.fronts, ShouldResemble, []string{})
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("Construct ampCacheRendezvous with cache and no front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newAMPCacheRendezvous("http://test.broker", "https://amp.cache/", []string{}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.String(), ShouldResemble, "http://test.broker")
			So(rend.cacheURL, ShouldNotBeNil)
			So(rend.cacheURL.String(), ShouldResemble, "https://amp.cache/")
			So(rend.fronts, ShouldResemble, []string{})
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("Construct ampCacheRendezvous with no cache and front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{"front"}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.String(), ShouldResemble, "http://test.broker")
			So(rend.cacheURL, ShouldBeNil)
			So(rend.fronts, ShouldContain, "front")
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("Construct ampCacheRendezvous with cache and front domain", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newAMPCacheRendezvous("http://test.broker", "https://amp.cache/", []string{"front"}, transport)
			So(err, ShouldBeNil)
			So(rend.brokerURL, ShouldNotBeNil)
			So(rend.brokerURL.String(), ShouldResemble, "http://test.broker")
			So(rend.cacheURL, ShouldNotBeNil)
			So(rend.cacheURL.String(), ShouldResemble, "https://amp.cache/")
			So(rend.fronts, ShouldContain, "front")
			So(rend.transport, ShouldEqual, transport)
		})

		Convey("ampCacheRendezvous.Exchange responds with answer", func() {
			fakeEncPollResp := makeEncPollResp(
				`{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`,
				"",
			)
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{},
				&mockTransport{http.StatusOK, ampArmorEncode(fakeEncPollResp)})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldBeNil)
			So(answer, ShouldResemble, fakeEncPollResp)
		})

		Convey("ampCacheRendezvous.Exchange responds with no answer", func() {
			fakeEncPollResp := makeEncPollResp(
				"",
				`{"error": "no snowflake proxies currently available"}`,
			)
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{},
				&mockTransport{http.StatusOK, ampArmorEncode(fakeEncPollResp)})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldBeNil)
			So(answer, ShouldResemble, fakeEncPollResp)
		})

		Convey("ampCacheRendezvous.Exchange fails with unexpected HTTP status code", func() {
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{},
				&mockTransport{http.StatusInternalServerError, []byte{}})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldNotBeNil)
			So(answer, ShouldBeNil)
			So(err.Error(), ShouldResemble, brokerErrorUnexpected)
		})

		Convey("ampCacheRendezvous.Exchange fails with error", func() {
			transportErr := errors.New("error")
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{},
				&errorTransport{err: transportErr})
			So(err, ShouldBeNil)
			answer, err := rend.Exchange(fakeEncPollReq)
			So(err, ShouldEqual, transportErr)
			So(answer, ShouldBeNil)
		})

		Convey("ampCacheRendezvous.Exchange fails with large read", func() {
			// readLimit should apply to the raw HTTP body, not the
			// encoded bytes. Encode readLimit bytes—the encoded
			// size will be larger—and try to read the body. It
			// should fail.
			rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{},
				&mockTransport{http.StatusOK, ampArmorEncode(make([]byte, readLimit))})
			So(err, ShouldBeNil)
			_, err = rend.Exchange(fakeEncPollReq)
			// We may get io.ErrUnexpectedEOF here, or something
			// like "missing </pre> tag".
			So(err, ShouldNotBeNil)
		})
	})
}

func TestSQSRendezvous(t *testing.T) {
	Convey("SQS Rendezvous", t, func() {
		var sendMessageInput *sqs.SendMessageInput
		var getQueueUrlInput *sqs.GetQueueUrlInput

		Convey("Construct SQS queue rendezvous", func() {
			transport := &mockTransport{http.StatusOK, []byte{}}
			rend, err := newSQSRendezvous("https://sqs.us-east-1.amazonaws.com", "eyJhd3MtYWNjZXNzLWtleS1pZCI6InRlc3QtYWNjZXNzLWtleSIsImF3cy1zZWNyZXQta2V5IjoidGVzdC1zZWNyZXQta2V5In0=", transport)

			So(err, ShouldBeNil)
			So(rend.sqsClient, ShouldNotBeNil)
			So(rend.sqsURL, ShouldNotBeNil)
			So(rend.sqsURL.String(), ShouldResemble, "https://sqs.us-east-1.amazonaws.com")
		})

		ctrl := gomock.NewController(t)
		mockSqsClient := sqsclient.NewMockSQSClient(ctrl)
		responseQueueURL := "https://sqs.us-east-1.amazonaws.com/testing"
		sqsUrl, _ := url.Parse("https://sqs.us-east-1.amazonaws.com/broker")
		fakeEncPollResp := makeEncPollResp(
			`{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`,
			"",
		)
		sqsRendezvous := sqsRendezvous{
			transport:  &mockTransport{http.StatusOK, []byte{}},
			sqsClient:  mockSqsClient,
			sqsURL:     sqsUrl,
			timeout:    0,
			numRetries: 5,
		}

		Convey("sqsRendezvous.Exchange responds with answer", func() {
			sqsClientId := ""
			mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) {
				So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp))
				So(*input.QueueUrl, ShouldEqual, sqsUrl.String())
				sqsClientId = *input.MessageAttributes["ClientID"].StringValue
			})
			mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) {
				So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId)
				return &sqs.GetQueueUrlOutput{
					QueueUrl: aws.String(responseQueueURL),
				}, nil
			})
			mockSqsClient.EXPECT().ReceiveMessage(gomock.Any(), gomock.Eq(&sqs.ReceiveMessageInput{
				QueueUrl:            &responseQueueURL,
				MaxNumberOfMessages: 1,
				WaitTimeSeconds:     20,
			})).Return(&sqs.ReceiveMessageOutput{
				Messages: []types.Message{{Body: aws.String("answer")}},
			}, nil)

			answer, err := sqsRendezvous.Exchange(fakeEncPollResp)

			So(answer, ShouldEqual, []byte("answer"))
			So(err, ShouldBeNil)
		})

		Convey("sqsRendezvous.Exchange cannot get queue url", func() {
			sqsClientId := ""
			mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) {
				So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp))
				So(*input.QueueUrl, ShouldEqual, sqsUrl.String())
				sqsClientId = *input.MessageAttributes["ClientID"].StringValue
			})
			for i := 0; i < sqsRendezvous.numRetries; i++ {
				mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) {
					So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId)
					return nil, errors.New("test error")
				})
			}

			answer, err := sqsRendezvous.Exchange(fakeEncPollResp)

			So(answer, ShouldBeNil)
			So(err, ShouldNotBeNil)
			So(err, ShouldEqual, errors.New("test error"))
		})

		Convey("sqsRendezvous.Exchange does not receive answer", func() {
			sqsClientId := ""
			mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) {
				So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp))
				So(*input.QueueUrl, ShouldEqual, sqsUrl.String())
				sqsClientId = *input.MessageAttributes["ClientID"].StringValue
			})
			mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) {
				So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId)
				return &sqs.GetQueueUrlOutput{
					QueueUrl: aws.String(responseQueueURL),
				}, nil
			})
			for i := 0; i < sqsRendezvous.numRetries; i++ {
				mockSqsClient.EXPECT().ReceiveMessage(gomock.Any(), gomock.Eq(&sqs.ReceiveMessageInput{
					QueueUrl:            &responseQueueURL,
					MaxNumberOfMessages: 1,
					WaitTimeSeconds:     20,
				})).Return(&sqs.ReceiveMessageOutput{
					Messages: []types.Message{},
				}, nil)
			}

			answer, err := sqsRendezvous.Exchange(fakeEncPollResp)

			So(answer, ShouldEqual, []byte{})
			So(err, ShouldBeNil)
		})
	})
}

func TestBrokerChannel(t *testing.T) {
	Convey("Requests a proxy and handles response", t, func() {
		answerSdp := &webrtc.SessionDescription{
			Type: webrtc.SDPTypeAnswer,
			SDP:  "test",
		}
		answerSdpStr, _ := util.SerializeSessionDescription(answerSdp)
		serverResponse, _ := (&messages.ClientPollResponse{
			Answer: answerSdpStr,
		}).EncodePollResponse()

		offerSdp := &webrtc.SessionDescription{
			Type: webrtc.SDPTypeOffer,
			SDP:  "test",
		}

		requestBodyChan := make(chan []byte)
		mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			body, _ := io.ReadAll(r.Body)
			go func() {
				requestBodyChan <- body
			}()
			w.Write(serverResponse)
		}))
		defer mockServer.Close()

		brokerChannel, err := newBrokerChannelFromConfig(ClientConfig{
			BrokerURL:         mockServer.URL,
			BridgeFingerprint: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
		})
		So(err, ShouldBeNil)
		brokerChannel.SetNATType(nat.NATRestricted)

		answerSdpReturned, err := brokerChannel.Negotiate(
			offerSdp,
			brokerChannel.GetNATType(),
		)
		So(err, ShouldBeNil)
		So(answerSdpReturned, ShouldEqual, answerSdp)

		body := <-requestBodyChan
		pollReq, err := messages.DecodeClientPollRequest(body)
		So(err, ShouldBeNil)
		So(pollReq.Fingerprint, ShouldEqual, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
		So(pollReq.NAT, ShouldEqual, nat.NATRestricted)
		requestSdp, err := util.DeserializeSessionDescription(pollReq.Offer)
		So(err, ShouldBeNil)
		So(requestSdp, ShouldEqual, offerSdp)
	})
}
07070100000025000081A400000000000000000000000167D9BD4E000037AC000000000000000000000000000000000000002900000000snowflake-2.11.0/client/lib/snowflake.go/*
Package snowflake_client implements functionality necessary for a client to establish a connection
to a server using Snowflake.

Included in the package is a Transport type that implements the Pluggable Transports v2.1 Go API
specification. To use Snowflake, you must first create a client from a configuration:

	config := snowflake_client.ClientConfig{
		BrokerURL:   "https://snowflake-broker.example.com",
		FrontDomain: "https://friendlyfrontdomain.net",
		// ...
	}
	transport, err := snowflake_client.NewSnowflakeClient(config)
	if err != nil {
		// handle error
	}

The Dial function connects to a Snowflake server:

	conn, err := transport.Dial()
	if err != nil {
		// handle error
	}
	defer conn.Close()
*/
package snowflake_client

import (
	"context"
	"errors"
	"log"
	"math/rand"
	"net"
	"net/url"
	"strings"
	"time"

	"github.com/pion/ice/v4"
	"github.com/pion/webrtc/v4"
	"github.com/xtaci/kcp-go/v5"
	"github.com/xtaci/smux"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel"
)

const (
	// ReconnectTimeout is the time a Snowflake client will wait before collecting
	// more snowflakes.
	ReconnectTimeout = 10 * time.Second
	// SnowflakeTimeout is the time a Snowflake client will wait before determining that
	// a remote snowflake has been disconnected. If no new messages are sent or received
	// in this time period, the client will terminate the connection with the remote
	// peer and collect a new snowflake.
	SnowflakeTimeout = 20 * time.Second
	// DataChannelTimeout is how long the client will wait for the OnOpen callback
	// on a newly created DataChannel.
	DataChannelTimeout = 10 * time.Second

	// WindowSize is the number of packets in the send and receive window of a KCP connection.
	WindowSize = 65535
	// StreamSize controls the maximum amount of in flight data between a client and server.
	StreamSize = 1048576 // 1MB
)

type dummyAddr struct{}

func (addr dummyAddr) Network() string { return "dummy" }
func (addr dummyAddr) String() string  { return "dummy" }

// Transport is a structure with methods that conform to the Go PT v2.1 API
// https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf
type Transport struct {
	dialer *WebRTCDialer

	// EventDispatcher is the event bus for snowflake events.
	// When an important event happens, it will be distributed here.
	eventDispatcher event.SnowflakeEventDispatcher
}

// ClientConfig defines how the SnowflakeClient will connect to the broker and Snowflake proxies.
type ClientConfig struct {
	// BrokerURL is the full URL of the Snowflake broker that the client will connect to.
	BrokerURL string
	// AmpCacheURL is the full URL of a valid AMP cache. A nonzero value indicates
	// that AMP cache will be used as the rendezvous method with the broker.
	AmpCacheURL string
	// SQSQueueURL is the full URL of an AWS SQS Queue. A nonzero value indicates
	// that SQS queue will be used as the rendezvous method with the broker.
	SQSQueueURL string
	// Base64 encoded string of the credentials containing access Key ID and secret key used to access the AWS SQS Qeueue
	SQSCredsStr string
	// FrontDomain is the full URL of an optional front domain that can be used with either
	// the AMP cache or HTTP domain fronting rendezvous method.
	FrontDomain string
	// ICEAddresses are a slice of ICE server URLs that will be used for NAT traversal and
	// the creation of the client's WebRTC SDP offer.
	FrontDomains []string
	// ICEAddresses are a slice of ICE server URLs that will be used for NAT traversal and
	// the creation of the client's WebRTC SDP offer.
	ICEAddresses []string
	// KeepLocalAddresses is an optional setting that will prevent the removal of local or
	// invalid addresses from the client's SDP offer. This is useful for local deployments
	// and testing.
	KeepLocalAddresses bool
	// Max is the maximum number of snowflake proxy peers that the client should attempt to
	// connect to. Defaults to 1.
	Max int
	// UTLSClientID is the type of user application that snowflake should imitate.
	// If an empty value is provided, it will use Go's default TLS implementation
	UTLSClientID string
	// UTLSRemoveSNI is the flag to control whether SNI should be removed from Client Hello
	// when uTLS is used.
	UTLSRemoveSNI bool
	// BridgeFingerprint is the fingerprint of the bridge that the client will eventually
	// connect to, as specified in the Bridge line of the torrc.
	BridgeFingerprint string
	// CommunicationProxy is the proxy address for network communication
	CommunicationProxy *url.URL
}

// NewSnowflakeClient creates a new Snowflake transport client that can spawn multiple
// Snowflake connections.
//
// brokerURL and frontDomain are the urls for the broker host and domain fronting host
// iceAddresses are the STUN/TURN urls needed for WebRTC negotiation
// keepLocalAddresses is a flag to enable sending local network addresses (for testing purposes)
// max is the maximum number of snowflakes the client should gather for each SOCKS connection
func NewSnowflakeClient(config ClientConfig) (*Transport, error) {
	log.Println("\n\n\n --- Starting Snowflake Client ---")

	iceServers := parseIceServers(config.ICEAddresses)
	// chooses a random subset of servers from inputs
	rand.Seed(time.Now().UnixNano())
	rand.Shuffle(len(iceServers), func(i, j int) {
		iceServers[i], iceServers[j] = iceServers[j], iceServers[i]
	})
	if len(iceServers) > 2 {
		iceServers = iceServers[:(len(iceServers)+1)/2]
	}
	log.Printf("Using ICE servers:")
	for _, server := range iceServers {
		log.Printf("url: %v", strings.Join(server.URLs, " "))
	}

	// Maintain backwards compatibility with old FrontDomain field of ClientConfig
	if (len(config.FrontDomains) == 0) && (config.FrontDomain != "") {
		config.FrontDomains = []string{config.FrontDomain}
	}

	// Rendezvous with broker using the given parameters.
	broker, err := newBrokerChannelFromConfig(config)
	if err != nil {
		return nil, err
	}
	go updateNATType(iceServers, broker, config.CommunicationProxy)

	natPolicy := &NATPolicy{}

	max := 1
	if config.Max > max {
		max = config.Max
	}
	eventsLogger := event.NewSnowflakeEventDispatcher()
	transport := &Transport{dialer: NewWebRTCDialerWithNatPolicyAndEventsAndProxy(broker, natPolicy, iceServers, max, eventsLogger, config.CommunicationProxy), eventDispatcher: eventsLogger}

	return transport, nil
}

// Dial creates a new Snowflake connection.
// Dial starts the collection of snowflakes and returns a SnowflakeConn that is a
// wrapper around a smux.Stream that will reliably deliver data to a Snowflake
// server through one or more snowflake proxies.
func (t *Transport) Dial() (net.Conn, error) {
	// Cleanup functions to run before returning, in case of an error.
	var cleanup []func()
	defer func() {
		// Run cleanup in reverse order, as defer does.
		for i := len(cleanup) - 1; i >= 0; i-- {
			cleanup[i]()
		}
	}()

	// Prepare to collect remote WebRTC peers.
	snowflakes, err := NewPeers(t.dialer)
	if err != nil {
		return nil, err
	}
	cleanup = append(cleanup, func() { snowflakes.End() })

	// Use a real logger to periodically output how much traffic is happening.
	snowflakes.bytesLogger = newBytesSyncLogger()

	log.Printf("---- SnowflakeConn: begin collecting snowflakes ---")
	go connectLoop(snowflakes)

	// Create a new smux session
	log.Printf("---- SnowflakeConn: starting a new session ---")
	pconn, sess, err := newSession(snowflakes)
	if err != nil {
		return nil, err
	}
	cleanup = append(cleanup, func() {
		pconn.Close()
		sess.Close()
	})

	// On the smux session we overlay a stream.
	stream, err := sess.OpenStream()
	if err != nil {
		return nil, err
	}
	// Begin exchanging data.
	log.Printf("---- SnowflakeConn: begin stream %v ---", stream.ID())
	cleanup = append(cleanup, func() { stream.Close() })

	// All good, clear the cleanup list.
	cleanup = nil
	return &SnowflakeConn{Stream: stream, sess: sess, pconn: pconn, snowflakes: snowflakes}, nil
}

func (t *Transport) AddSnowflakeEventListener(receiver event.SnowflakeEventReceiver) {
	t.eventDispatcher.AddSnowflakeEventListener(receiver)
}

func (t *Transport) RemoveSnowflakeEventListener(receiver event.SnowflakeEventReceiver) {
	t.eventDispatcher.RemoveSnowflakeEventListener(receiver)
}

// SetRendezvousMethod sets the rendezvous method to the Snowflake broker.
func (t *Transport) SetRendezvousMethod(r RendezvousMethod) {
	t.dialer.Rendezvous = r
}

// SnowflakeConn is a reliable connection to a snowflake server that implements net.Conn.
type SnowflakeConn struct {
	*smux.Stream
	sess       *smux.Session
	pconn      net.PacketConn
	snowflakes *Peers
}

// Close closes the connection.
//
// The collection of snowflake proxies for this connection is stopped.
func (conn *SnowflakeConn) Close() error {
	var err error
	log.Printf("---- SnowflakeConn: closed stream %v ---", conn.ID())
	err = conn.Stream.Close()
	log.Printf("---- SnowflakeConn: end collecting snowflakes ---")
	conn.snowflakes.End()
	if inerr := conn.pconn.Close(); err == nil {
		err = inerr
	}
	log.Printf("---- SnowflakeConn: discarding finished session ---")
	if inerr := conn.sess.Close(); err == nil {
		err = inerr
	}
	return err
}

// loop through all provided STUN servers until we exhaust the list or find
// one that is compatible with RFC 5780
func updateNATType(servers []webrtc.ICEServer, broker *BrokerChannel, proxy *url.URL) {
	var restrictedNAT bool
	var err error
	for _, server := range servers {
		addr := strings.TrimPrefix(server.URLs[0], "stun:")
		restrictedNAT, err = nat.CheckIfRestrictedNATWithProxy(addr, proxy)

		if err != nil {
			log.Printf("Warning: NAT checking failed for server at %s: %s", addr, err)
		} else {
			if restrictedNAT {
				broker.SetNATType(nat.NATRestricted)
			} else {
				broker.SetNATType(nat.NATUnrestricted)
			}
			break
		}
	}
	if err != nil {
		broker.SetNATType(nat.NATUnknown)
	}
}

// Returns a slice of webrtc.ICEServer given a slice of addresses
func parseIceServers(addresses []string) []webrtc.ICEServer {
	var servers []webrtc.ICEServer
	if len(addresses) == 0 {
		return nil
	}
	for _, address := range addresses {
		address = strings.TrimSpace(address)

		// ice.ParseURL recognizes many types of ICE servers,
		// but we only support stun over UDP currently
		u, err := url.Parse(address)
		if err != nil {
			log.Printf("Warning: Parsing ICE server %v resulted in error: %v, skipping", address, err)
			continue
		}
		if u.Scheme != "stun" {
			log.Printf("Warning: Only stun: (STUN over UDP) servers are supported currently, skipping %v", address)
			continue
		}

		// add default port, other sanity checks
		parsedURL, err := ice.ParseURL(address)
		if err != nil {
			log.Printf("Warning: Parsing ICE server %v resulted in error: %v, skipping", address, err)
			continue
		}

		servers = append(servers, webrtc.ICEServer{
			URLs: []string{parsedURL.String()},
		})
	}
	return servers
}

// newSession returns a new smux.Session and the net.PacketConn it is running
// over. The net.PacketConn successively connects through Snowflake proxies
// pulled from snowflakes.
func newSession(snowflakes SnowflakeCollector) (net.PacketConn, *smux.Session, error) {
	clientID := turbotunnel.NewClientID()

	// We build a persistent KCP session on a sequence of ephemeral WebRTC
	// connections. This dialContext tells RedialPacketConn how to get a new
	// WebRTC connection when the previous one dies. Inside each WebRTC
	// connection, we use encapsulationPacketConn to encode packets into a
	// stream.
	dialContext := func(ctx context.Context) (net.PacketConn, error) {
		log.Printf("redialing on same connection")
		// Obtain an available WebRTC remote. May block.
		conn := snowflakes.Pop()
		if conn == nil {
			return nil, errors.New("handler: Received invalid Snowflake")
		}
		log.Println("---- Handler: snowflake assigned ----")
		// Send the magic Turbo Tunnel token.
		_, err := conn.Write(turbotunnel.Token[:])
		if err != nil {
			return nil, err
		}
		// Send ClientID prefix.
		_, err = conn.Write(clientID[:])
		if err != nil {
			return nil, err
		}
		return newEncapsulationPacketConn(dummyAddr{}, dummyAddr{}, conn), nil
	}
	pconn := turbotunnel.NewRedialPacketConn(dummyAddr{}, dummyAddr{}, dialContext)

	// conn is built on the underlying RedialPacketConn—when one WebRTC
	// connection dies, another one will be found to take its place. The
	// sequence of packets across multiple WebRTC connections drives the KCP
	// engine.
	conn, err := kcp.NewConn2(dummyAddr{}, nil, 0, 0, pconn)
	if err != nil {
		pconn.Close()
		return nil, nil, err
	}
	// Permit coalescing the payloads of consecutive sends.
	conn.SetStreamMode(true)
	// Set the maximum send and receive window sizes to a high number
	// Removes KCP bottlenecks: https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40026
	conn.SetWindowSize(WindowSize, WindowSize)
	// Disable the dynamic congestion window (limit only by the
	// maximum of local and remote static windows).
	conn.SetNoDelay(
		0, // default nodelay
		0, // default interval
		0, // default resend
		1, // nc=1 => congestion window off
	)
	// On the KCP connection we overlay an smux session and stream.
	smuxConfig := smux.DefaultConfig()
	smuxConfig.Version = 2
	smuxConfig.KeepAliveTimeout = 10 * time.Minute
	smuxConfig.MaxStreamBuffer = StreamSize

	sess, err := smux.Client(conn, smuxConfig)
	if err != nil {
		conn.Close()
		pconn.Close()
		return nil, nil, err
	}

	return pconn, sess, err
}

// Maintain |SnowflakeCapacity| number of available WebRTC connections, to
// transfer to the Tor SOCKS handler when needed.
func connectLoop(snowflakes SnowflakeCollector) {
	for {
		timer := time.After(ReconnectTimeout)
		_, err := snowflakes.Collect()
		if err != nil {
			log.Printf("WebRTC: %v  Retrying...", err)
		}
		select {
		case <-timer:
			continue
		case <-snowflakes.Melted():
			log.Println("ConnectLoop: stopped.")
			return
		}
	}
}
07070100000026000081A400000000000000000000000167D9BD4E000007D8000000000000000000000000000000000000002B00000000snowflake-2.11.0/client/lib/turbotunnel.gopackage snowflake_client

import (
	"bufio"
	"errors"
	"io"
	"net"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/encapsulation"
)

var errNotImplemented = errors.New("not implemented")

// encapsulationPacketConn implements the net.PacketConn interface over an
// io.ReadWriteCloser stream, using the encapsulation package to represent
// packets in a stream.
type encapsulationPacketConn struct {
	io.ReadWriteCloser
	localAddr  net.Addr
	remoteAddr net.Addr
	bw         *bufio.Writer
}

// newEncapsulationPacketConn makes an encapsulationPacketConn out of a given
// io.ReadWriteCloser and provided local and remote addresses.
func newEncapsulationPacketConn(
	localAddr, remoteAddr net.Addr,
	conn io.ReadWriteCloser,
) *encapsulationPacketConn {
	return &encapsulationPacketConn{
		ReadWriteCloser: conn,
		localAddr:       localAddr,
		remoteAddr:      remoteAddr,
		bw:              bufio.NewWriter(conn),
	}
}

// ReadFrom reads an encapsulated packet from the stream.
func (c *encapsulationPacketConn) ReadFrom(p []byte) (int, net.Addr, error) {
	n, err := encapsulation.ReadData(c.ReadWriteCloser, p)
	if err == io.ErrShortBuffer {
		err = nil
	}
	return n, c.remoteAddr, err
}

// WriteTo writes an encapsulated packet to the stream.
func (c *encapsulationPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) {
	// addr is ignored.
	_, err := encapsulation.WriteData(c.bw, p)
	if err == nil {
		err = c.bw.Flush()
	}
	if err != nil {
		return 0, err
	}
	return len(p), nil
}

// LocalAddr returns the localAddr value that was passed to
// NewEncapsulationPacketConn.
func (c *encapsulationPacketConn) LocalAddr() net.Addr {
	return c.localAddr
}

func (c *encapsulationPacketConn) SetDeadline(t time.Time) error      { return errNotImplemented }
func (c *encapsulationPacketConn) SetReadDeadline(t time.Time) error  { return errNotImplemented }
func (c *encapsulationPacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented }
07070100000027000081A400000000000000000000000167D9BD4E000005E6000000000000000000000000000000000000002400000000snowflake-2.11.0/client/lib/util.gopackage snowflake_client

import (
	"log"
	"time"
)

const (
	LogTimeInterval = 5 * time.Second
)

type bytesLogger interface {
	addOutbound(int64)
	addInbound(int64)
}

// Default bytesLogger does nothing.
type bytesNullLogger struct{}

func (b bytesNullLogger) addOutbound(amount int64) {}
func (b bytesNullLogger) addInbound(amount int64)  {}

// bytesSyncLogger uses channels to safely log from multiple sources with output
// occuring at reasonable intervals.
type bytesSyncLogger struct {
	outboundChan chan int64
	inboundChan  chan int64
}

// newBytesSyncLogger returns a new bytesSyncLogger and starts it loggin.
func newBytesSyncLogger() *bytesSyncLogger {
	b := &bytesSyncLogger{
		outboundChan: make(chan int64, 5),
		inboundChan:  make(chan int64, 5),
	}
	go b.log()
	return b
}

func (b *bytesSyncLogger) log() {
	var outbound, inbound int64
	var outEvents, inEvents int
	ticker := time.NewTicker(LogTimeInterval)
	for {
		select {
		case <-ticker.C:
			if outEvents > 0 || inEvents > 0 {
				log.Printf("Traffic Bytes (in|out): %d | %d -- (%d OnMessages, %d Sends)",
					inbound, outbound, inEvents, outEvents)
			}
			outbound = 0
			outEvents = 0
			inbound = 0
			inEvents = 0
		case amount := <-b.outboundChan:
			outbound += amount
			outEvents++
		case amount := <-b.inboundChan:
			inbound += amount
			inEvents++
		}
	}
}

func (b *bytesSyncLogger) addOutbound(amount int64) {
	b.outboundChan <- amount
}

func (b *bytesSyncLogger) addInbound(amount int64) {
	b.inboundChan <- amount
}
07070100000028000081A400000000000000000000000167D9BD4E00002AF5000000000000000000000000000000000000002600000000snowflake-2.11.0/client/lib/webrtc.gopackage snowflake_client

import (
	"crypto/rand"
	"encoding/hex"
	"errors"
	"io"
	"log"
	"net"
	"net/url"
	"sync"
	"time"

	"github.com/pion/ice/v4"
	"github.com/pion/transport/v3"
	"github.com/pion/transport/v3/stdnet"
	"github.com/pion/webrtc/v4"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

// WebRTCPeer represents a WebRTC connection to a remote snowflake proxy.
//
// Each WebRTCPeer only ever has one DataChannel that is used as the peer's transport.
type WebRTCPeer struct {
	id        string
	pc        *webrtc.PeerConnection
	transport *webrtc.DataChannel

	recvPipe  *io.PipeReader
	writePipe *io.PipeWriter

	mu          sync.Mutex // protects the following:
	lastReceive time.Time

	open   chan struct{} // Channel to notify when datachannel opens
	closed chan struct{}

	once sync.Once // Synchronization for PeerConnection destruction

	bytesLogger  bytesLogger
	eventsLogger event.SnowflakeEventReceiver
	proxy        *url.URL
}

// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead.
func NewWebRTCPeer(
	config *webrtc.Configuration, broker *BrokerChannel,
) (*WebRTCPeer, error) {
	return NewWebRTCPeerWithNatPolicyAndEventsAndProxy(
		config, broker, nil, nil, nil,
	)
}

// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead.
func NewWebRTCPeerWithEvents(
	config *webrtc.Configuration, broker *BrokerChannel,
	eventsLogger event.SnowflakeEventReceiver,
) (*WebRTCPeer, error) {
	return NewWebRTCPeerWithNatPolicyAndEventsAndProxy(
		config, broker, nil, eventsLogger, nil,
	)
}

// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead.
func NewWebRTCPeerWithEventsAndProxy(
	config *webrtc.Configuration, broker *BrokerChannel,
	eventsLogger event.SnowflakeEventReceiver, proxy *url.URL,
) (*WebRTCPeer, error) {
	return NewWebRTCPeerWithNatPolicyAndEventsAndProxy(
		config, broker, nil, eventsLogger, proxy,
	)
}

// NewWebRTCPeerWithNatPolicyAndEventsAndProxy constructs
// a WebRTC PeerConnection to a snowflake proxy.
//
// The creation of the peer handles the signaling to the Snowflake broker, including
// the exchange of SDP information, the creation of a PeerConnection, and the establishment
// of a DataChannel to the Snowflake proxy.
func NewWebRTCPeerWithNatPolicyAndEventsAndProxy(
	config *webrtc.Configuration, broker *BrokerChannel, natPolicy *NATPolicy,
	eventsLogger event.SnowflakeEventReceiver, proxy *url.URL,
) (*WebRTCPeer, error) {
	if eventsLogger == nil {
		eventsLogger = event.NewSnowflakeEventDispatcher()
	}

	connection := new(WebRTCPeer)
	{
		var buf [8]byte
		if _, err := rand.Read(buf[:]); err != nil {
			panic(err)
		}
		connection.id = "snowflake-" + hex.EncodeToString(buf[:])
	}
	connection.closed = make(chan struct{})

	// Override with something that's not NullLogger to have real logging.
	connection.bytesLogger = &bytesNullLogger{}

	// Pipes remain the same even when DataChannel gets switched.
	connection.recvPipe, connection.writePipe = io.Pipe()

	connection.eventsLogger = eventsLogger
	connection.proxy = proxy

	err := connection.connect(config, broker, natPolicy)
	if err != nil {
		connection.Close()
		return nil, err
	}
	return connection, nil
}

// Read bytes from local SOCKS.
// As part of |io.ReadWriter|
func (c *WebRTCPeer) Read(b []byte) (int, error) {
	return c.recvPipe.Read(b)
}

// Writes bytes out to remote WebRTC.
// As part of |io.ReadWriter|
func (c *WebRTCPeer) Write(b []byte) (int, error) {
	err := c.transport.Send(b)
	if err != nil {
		return 0, err
	}
	c.bytesLogger.addOutbound(int64(len(b)))
	return len(b), nil
}

// Closed returns a boolean indicated whether the peer is closed.
func (c *WebRTCPeer) Closed() bool {
	select {
	case <-c.closed:
		return true
	default:
	}
	return false
}

// Close closes the connection the snowflake proxy.
func (c *WebRTCPeer) Close() error {
	c.once.Do(func() {
		close(c.closed)
		c.cleanup()
		log.Printf("WebRTC: Closing")
	})
	return nil
}

// Prevent long-lived broken remotes.
// Should also update the DataChannel in underlying go-webrtc's to make Closes
// more immediate / responsive.
func (c *WebRTCPeer) checkForStaleness(timeout time.Duration) {
	c.mu.Lock()
	c.lastReceive = time.Now()
	c.mu.Unlock()
	for {
		c.mu.Lock()
		lastReceive := c.lastReceive
		c.mu.Unlock()
		if time.Since(lastReceive) > timeout {
			log.Printf("WebRTC: No messages received for %v -- closing stale connection.",
				timeout)
			err := errors.New("no messages received, closing stale connection")
			c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err})
			c.Close()
			return
		}
		select {
		case <-c.closed:
			return
		case <-time.After(time.Second):
		}
	}
}

// connect does the bulk of the work: gather ICE candidates, send the SDP offer to broker,
// receive an answer from broker, and wait for data channel to open.
//
// `natPolicy` can be nil, in which case we'll always send our actual
// NAT type to the broker.
func (c *WebRTCPeer) connect(
	config *webrtc.Configuration,
	broker *BrokerChannel,
	natPolicy *NATPolicy,
) error {
	log.Println(c.id, " connecting...")

	err := c.preparePeerConnection(config, broker.keepLocalAddresses)
	localDescription := c.pc.LocalDescription()
	c.eventsLogger.OnNewSnowflakeEvent(event.EventOnOfferCreated{
		WebRTCLocalDescription: localDescription,
		Error:                  err,
	})
	if err != nil {
		return err
	}

	actualNatType := broker.GetNATType()
	var natTypeToSend string
	if natPolicy != nil {
		natTypeToSend = natPolicy.NATTypeToSend(actualNatType)
	} else {
		natTypeToSend = actualNatType
	}
	if natTypeToSend != actualNatType {
		log.Printf(
			"Our NAT type is \"%v\", but let's tell the broker it's \"%v\".",
			actualNatType,
			natTypeToSend,
		)
	} else {
		log.Printf("natTypeToSend: \"%v\" (same as actualNatType)", natTypeToSend)
	}

	answer, err := broker.Negotiate(localDescription, natTypeToSend)
	c.eventsLogger.OnNewSnowflakeEvent(event.EventOnBrokerRendezvous{
		WebRTCRemoteDescription: answer,
		Error:                   err,
	})
	if err != nil {
		return err
	}
	log.Printf("Received Answer.\n")
	err = c.pc.SetRemoteDescription(*answer)
	if nil != err {
		log.Println("WebRTC: Unable to SetRemoteDescription:", err)
		return err
	}

	// Wait for the datachannel to open or time out
	select {
	case <-c.open:
		if natPolicy != nil {
			natPolicy.Success(actualNatType, natTypeToSend)
		}
	case <-time.After(DataChannelTimeout):
		c.transport.Close()
		err := errors.New("timeout waiting for DataChannel.OnOpen")
		if natPolicy != nil {
			natPolicy.Failure(actualNatType, natTypeToSend)
		}
		c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err})
		return err
	}

	go c.checkForStaleness(SnowflakeTimeout)
	return nil
}

// preparePeerConnection creates a new WebRTC PeerConnection and returns it
// after non-trickle ICE candidate gathering is complete.
func (c *WebRTCPeer) preparePeerConnection(
	config *webrtc.Configuration,
	keepLocalAddresses bool,
) error {
	s := webrtc.SettingEngine{}

	if !keepLocalAddresses {
		s.SetIPFilter(func(ip net.IP) (keep bool) {
			// `IsLoopback()` and `IsUnspecified` are likely not neded here,
			// but let's keep them just in case.
			// FYI there is similar code in other files in this project.
			keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified()
			return
		})
		s.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled)
	}
	s.SetIncludeLoopbackCandidate(keepLocalAddresses)

	// Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet
	// to get snowflake working in shadow (where the AF_NETLINK family is not implemented).
	// These two lines of code functionally revert a new change in pion by silently ignoring
	// when net.Interfaces() fails, rather than throwing an error
	var vnet transport.Net
	vnet, _ = stdnet.NewNet()

	if c.proxy != nil {
		if err := proxy.CheckProxyProtocolSupport(c.proxy); err != nil {
			return err
		}
		socksClient := proxy.NewSocks5UDPClient(c.proxy)
		vnet = proxy.NewTransportWrapper(&socksClient, vnet)
	}

	s.SetNet(vnet)
	api := webrtc.NewAPI(webrtc.WithSettingEngine(s))
	var err error
	c.pc, err = api.NewPeerConnection(*config)
	if err != nil {
		log.Printf("NewPeerConnection ERROR: %s", err)
		return err
	}
	ordered := true
	dataChannelOptions := &webrtc.DataChannelInit{
		Ordered: &ordered,
	}
	// We must create the data channel before creating an offer
	// https://github.com/pion/webrtc/wiki/Release-WebRTC@v3.0.0#a-data-channel-is-no-longer-implicitly-created-with-a-peerconnection
	dc, err := c.pc.CreateDataChannel(c.id, dataChannelOptions)
	if err != nil {
		log.Printf("CreateDataChannel ERROR: %s", err)
		return err
	}
	dc.OnOpen(func() {
		c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnected{})
		log.Println("WebRTC: DataChannel.OnOpen")
		close(c.open)
	})
	dc.OnClose(func() {
		log.Println("WebRTC: DataChannel.OnClose")
		c.Close()
	})
	dc.OnError(func(err error) {
		c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err})
	})
	dc.OnMessage(func(msg webrtc.DataChannelMessage) {
		if len(msg.Data) <= 0 {
			log.Println("0 length message---")
		}
		n, err := c.writePipe.Write(msg.Data)
		c.bytesLogger.addInbound(int64(n))
		if err != nil {
			// TODO: Maybe shouldn't actually close.
			log.Println("Error writing to SOCKS pipe")
			if inerr := c.writePipe.CloseWithError(err); inerr != nil {
				log.Printf("c.writePipe.CloseWithError returned error: %v", inerr)
			}
		}
		c.mu.Lock()
		c.lastReceive = time.Now()
		c.mu.Unlock()
	})
	c.transport = dc
	c.open = make(chan struct{})
	log.Println("WebRTC: DataChannel created")

	offer, err := c.pc.CreateOffer(nil)
	// TODO: Potentially timeout and retry if ICE isn't working.
	if err != nil {
		log.Println("Failed to prepare offer", err)
		c.pc.Close()
		return err
	}
	log.Println("WebRTC: Created offer")

	// Allow candidates to accumulate until ICEGatheringStateComplete.
	done := webrtc.GatheringCompletePromise(c.pc)
	// Start gathering candidates
	err = c.pc.SetLocalDescription(offer)
	if err != nil {
		log.Println("Failed to apply offer", err)
		c.pc.Close()
		return err
	}
	log.Println("WebRTC: Set local description")

	<-done // Wait for ICE candidate gathering to complete.

	return nil
}

// cleanup closes all channels and transports
func (c *WebRTCPeer) cleanup() {
	// Close this side of the SOCKS pipe.
	if c.writePipe != nil { // c.writePipe can be nil in tests.
		c.writePipe.Close()
	}
	if nil != c.transport {
		log.Printf("WebRTC: closing DataChannel")
		c.transport.Close()
	}
	if nil != c.pc {
		log.Printf("WebRTC: closing PeerConnection")
		err := c.pc.Close()
		if nil != err {
			log.Printf("Error closing peerconnection...")
		}
	}
}
07070100000029000081A400000000000000000000000167D9BD4E000025B1000000000000000000000000000000000000002500000000snowflake-2.11.0/client/snowflake.go// Client transport plugin for the Snowflake pluggable transport.
package main

import (
	"flag"
	"fmt"
	"io"
	"log"
	"net"
	"os"
	"os/signal"
	"path/filepath"
	"strconv"
	"strings"
	"sync"
	"syscall"

	pt "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"

	sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version"
)

const (
	DefaultSnowflakeCapacity = 1
)

type ptEventLogger struct {
}

func NewPTEventLogger() event.SnowflakeEventReceiver {
	return &ptEventLogger{}
}

func (p ptEventLogger) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
	pt.Log(pt.LogSeverityNotice, e.String())
}

// Exchanges bytes between two ReadWriters.
// (In this case, between a SOCKS connection and a snowflake transport conn)
func copyLoop(socks, sfconn io.ReadWriter) {
	done := make(chan struct{}, 2)
	go func() {
		if _, err := io.Copy(socks, sfconn); err != nil {
			log.Printf("copying Snowflake to SOCKS resulted in error: %v", err)
		}
		done <- struct{}{}
	}()
	go func() {
		if _, err := io.Copy(sfconn, socks); err != nil {
			log.Printf("copying SOCKS to Snowflake resulted in error: %v", err)
		}
		done <- struct{}{}
	}()
	<-done
	log.Println("copy loop ended")
}

// Accept local SOCKS connections and connect to a Snowflake connection
func socksAcceptLoop(ln *pt.SocksListener, baseConfig sf.ClientConfig,
	shutdown chan struct{}, wg *sync.WaitGroup) {
	defer ln.Close()
	for {
		conn, err := ln.AcceptSocks()
		if err != nil {
			if err, ok := err.(net.Error); ok && err.Temporary() {
				continue
			}
			log.Printf("SOCKS accept error: %s", err)
			break
		}
		log.Printf("SOCKS accepted: %v", conn.Req)
		wg.Add(1)
		go func() {
			defer wg.Done()
			defer conn.Close()

			config := baseConfig
			// Check to see if our command line options are overriden by SOCKS options
			if arg, ok := conn.Req.Args.Get("ampcache"); ok {
				config.AmpCacheURL = arg
			}
			if arg, ok := conn.Req.Args.Get("sqsqueue"); ok {
				config.SQSQueueURL = arg
			}
			if arg, ok := conn.Req.Args.Get("sqscreds"); ok {
				config.SQSCredsStr = arg
			}
			if arg, ok := conn.Req.Args.Get("fronts"); ok {
				if arg != "" {
					config.FrontDomains = strings.Split(strings.TrimSpace(arg), ",")
				}
			} else if arg, ok := conn.Req.Args.Get("front"); ok {
				config.FrontDomains = strings.Split(strings.TrimSpace(arg), ",")
			}
			if arg, ok := conn.Req.Args.Get("ice"); ok {
				config.ICEAddresses = strings.Split(strings.TrimSpace(arg), ",")
			}
			if arg, ok := conn.Req.Args.Get("max"); ok {
				max, err := strconv.Atoi(arg)
				if err != nil {
					conn.Reject()
					log.Println("Invalid SOCKS arg: max=", arg)
					return
				}
				config.Max = max
			}
			if arg, ok := conn.Req.Args.Get("url"); ok {
				config.BrokerURL = arg
			}
			if arg, ok := conn.Req.Args.Get("utls-nosni"); ok {
				switch strings.ToLower(arg) {
				case "true":
					fallthrough
				case "yes":
					config.UTLSRemoveSNI = true
				}
			}
			if arg, ok := conn.Req.Args.Get("utls-imitate"); ok {
				config.UTLSClientID = arg
			}
			if arg, ok := conn.Req.Args.Get("fingerprint"); ok {
				config.BridgeFingerprint = arg
			}
			transport, err := sf.NewSnowflakeClient(config)
			if err != nil {
				conn.Reject()
				log.Println("Failed to start snowflake transport: ", err)
				return
			}
			transport.AddSnowflakeEventListener(NewPTEventLogger())
			err = conn.Grant(&net.TCPAddr{IP: net.IPv4zero, Port: 0})
			if err != nil {
				log.Printf("conn.Grant error: %s", err)
				return
			}

			handler := make(chan struct{})
			go func() {
				defer close(handler)
				sconn, err := transport.Dial()
				if err != nil {
					log.Printf("dial error: %s", err)
					return
				}
				defer sconn.Close()
				// copy between the created Snowflake conn and the SOCKS conn
				copyLoop(conn, sconn)
			}()
			select {
			case <-shutdown:
				log.Println("Received shutdown signal")
			case <-handler:
				log.Println("Handler ended")
			}
			return
		}()
	}
}

func main() {
	iceServersCommas := flag.String("ice", "", "comma-separated list of ICE servers")
	brokerURL := flag.String("url", "", "URL of signaling broker")
	frontDomain := flag.String("front", "", "front domain")
	frontDomainsCommas := flag.String("fronts", "", "comma-separated list of front domains")
	ampCacheURL := flag.String("ampcache", "", "URL of AMP cache to use as a proxy for signaling")
	sqsQueueURL := flag.String("sqsqueue", "", "URL of SQS Queue to use as a proxy for signaling")
	sqsCredsStr := flag.String("sqscreds", "", "credentials to access SQS Queue")
	logFilename := flag.String("log", "", "name of log file")
	logToStateDir := flag.Bool("log-to-state-dir", false, "resolve the log file relative to tor's pt state dir")
	keepLocalAddresses := flag.Bool("keep-local-addresses", false, "keep local LAN address ICE candidates.\nThis is usually pointless because Snowflake proxies don't usually reside on the same local network as the client.")
	unsafeLogging := flag.Bool("unsafe-logging", false, "keep IP addresses and other sensitive info in the logs")
	max := flag.Int("max", DefaultSnowflakeCapacity,
		"capacity for number of multiplexed WebRTC peers")
	versionFlag := flag.Bool("version", false, "display version info to stderr and quit")

	// Deprecated
	oldLogToStateDir := flag.Bool("logToStateDir", false, "use -log-to-state-dir instead")
	oldKeepLocalAddresses := flag.Bool("keepLocalAddresses", false, "use -keep-local-addresses instead")

	flag.Parse()

	if *versionFlag {
		fmt.Fprintf(os.Stderr, "snowflake-client %s", version.ConstructResult())
		os.Exit(0)
	}

	log.SetFlags(log.LstdFlags | log.LUTC)

	// Don't write to stderr; versions of tor earlier than about 0.3.5.6 do
	// not read from the pipe, and eventually we will deadlock because the
	// buffer is full.
	// https://bugs.torproject.org/26360
	// https://bugs.torproject.org/25600#comment:14
	var logOutput = io.Discard
	if *logFilename != "" {
		if *logToStateDir || *oldLogToStateDir {
			stateDir, err := pt.MakeStateDir()
			if err != nil {
				log.Fatal(err)
			}
			*logFilename = filepath.Join(stateDir, *logFilename)
		}
		logFile, err := os.OpenFile(*logFilename,
			os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
		if err != nil {
			log.Fatal(err)
		}
		defer logFile.Close()
		logOutput = logFile
	}
	if *unsafeLogging {
		log.SetOutput(logOutput)
	} else {
		// We want to send the log output through our scrubber first
		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
	}

	log.Printf("snowflake-client %s\n", version.GetVersion())

	iceAddresses := strings.Split(strings.TrimSpace(*iceServersCommas), ",")

	var frontDomains []string
	if *frontDomainsCommas != "" {
		frontDomains = strings.Split(strings.TrimSpace(*frontDomainsCommas), ",")
	}

	// Maintain backwards compatability with legacy commandline option
	if (len(frontDomains) == 0) && (*frontDomain != "") {
		frontDomains = []string{*frontDomain}
	}

	config := sf.ClientConfig{
		BrokerURL:          *brokerURL,
		AmpCacheURL:        *ampCacheURL,
		SQSQueueURL:        *sqsQueueURL,
		SQSCredsStr:        *sqsCredsStr,
		FrontDomains:       frontDomains,
		ICEAddresses:       iceAddresses,
		KeepLocalAddresses: *keepLocalAddresses || *oldKeepLocalAddresses,
		Max:                *max,
	}

	// Begin goptlib client process.
	ptInfo, err := pt.ClientSetup(nil)
	if err != nil {
		log.Fatal(err)
	}
	if ptInfo.ProxyURL != nil {
		if err := proxy.CheckProxyProtocolSupport(ptInfo.ProxyURL); err != nil {
			pt.ProxyError("proxy is not supported:" + err.Error())
			os.Exit(1)
		} else {
			config.CommunicationProxy = ptInfo.ProxyURL
			client := proxy.NewSocks5UDPClient(config.CommunicationProxy)
			conn, err := client.ListenPacket("udp", nil)
			if err != nil {
				pt.ProxyError("proxy test failure:" + err.Error())
				os.Exit(1)
			}
			conn.Close()
			pt.ProxyDone()
		}
	}
	pt.ReportVersion("snowflake-client", version.GetVersion())
	listeners := make([]net.Listener, 0)
	shutdown := make(chan struct{})
	var wg sync.WaitGroup
	for _, methodName := range ptInfo.MethodNames {
		switch methodName {
		case "snowflake":
			// TODO: Be able to recover when SOCKS dies.
			ln, err := pt.ListenSocks("tcp", "127.0.0.1:0")
			if err != nil {
				pt.CmethodError(methodName, err.Error())
				break
			}
			log.Printf("Started SOCKS listener at %v.", ln.Addr())
			go socksAcceptLoop(ln, config, shutdown, &wg)
			pt.Cmethod(methodName, ln.Version(), ln.Addr())
			listeners = append(listeners, ln)
		default:
			pt.CmethodError(methodName, "no such method")
		}
	}
	pt.CmethodsDone()

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGTERM)

	if os.Getenv("TOR_PT_EXIT_ON_STDIN_CLOSE") == "1" {
		// This environment variable means we should treat EOF on stdin
		// just like SIGTERM: https://bugs.torproject.org/15435.
		go func() {
			if _, err := io.Copy(io.Discard, os.Stdin); err != nil {
				log.Printf("calling io.Copy(io.Discard, os.Stdin) returned error: %v", err)
			}
			log.Printf("synthesizing SIGTERM because of stdin close")
			sigChan <- syscall.SIGTERM
		}()
	}

	// Wait for a signal.
	<-sigChan
	log.Println("stopping snowflake")

	// Signal received, shut down.
	for _, ln := range listeners {
		ln.Close()
	}
	close(shutdown)
	wg.Wait()
	log.Println("snowflake is done.")
}
0707010000002A000081A400000000000000000000000167D9BD4E00000406000000000000000000000000000000000000001E00000000snowflake-2.11.0/client/torrcUseBridges 1
DataDirectory datadir

ClientTransportPlugin snowflake exec ./client -log snowflake.log

Bridge snowflake 192.0.2.3:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn
Bridge snowflake 192.0.2.4:80 8838024498816A039FCBBAB14E6F40A0843051FA fingerprint=8838024498816A039FCBBAB14E6F40A0843051FA url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn

SocksPort auto
0707010000002B000081A400000000000000000000000167D9BD4E000000A1000000000000000000000000000000000000002800000000snowflake-2.11.0/client/torrc.localhostUseBridges 1
DataDirectory datadir

ClientTransportPlugin snowflake exec ./client -keep-local-addresses

Bridge snowflake 192.0.2.3:1 url=http://localhost:8080/
0707010000002C000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001800000000snowflake-2.11.0/common0707010000002D000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001C00000000snowflake-2.11.0/common/amp0707010000002E000081A400000000000000000000000167D9BD4E00000C9C000000000000000000000000000000000000002D00000000snowflake-2.11.0/common/amp/armor_decoder.gopackage amp

import (
	"bufio"
	"bytes"
	"encoding/base64"
	"fmt"
	"io"

	"golang.org/x/net/html"
)

// ErrUnknownVersion is the error returned when the first character inside the
// element encoding (but outside the base64 encoding) is not '0'.
type ErrUnknownVersion byte

func (err ErrUnknownVersion) Error() string {
	return fmt.Sprintf("unknown armor version indicator %+q", byte(err))
}

func isASCIIWhitespace(b byte) bool {
	switch b {
	// https://infra.spec.whatwg.org/#ascii-whitespace
	case '\x09', '\x0a', '\x0c', '\x0d', '\x20':
		return true
	default:
		return false
	}
}

func splitASCIIWhitespace(data []byte, atEOF bool) (advance int, token []byte, err error) {
	var i, j int
	// Skip initial whitespace.
	for i = 0; i < len(data); i++ {
		if !isASCIIWhitespace(data[i]) {
			break
		}
	}
	// Look for next whitespace.
	for j = i; j < len(data); j++ {
		if isASCIIWhitespace(data[j]) {
			return j + 1, data[i:j], nil
		}
	}
	// We reached the end of data without finding more whitespace. Only
	// consider it a token if we are at EOF.
	if atEOF && i < j {
		return j, data[i:j], nil
	}
	// Otherwise, request more data.
	return i, nil, nil
}

func decodeToWriter(w io.Writer, r io.Reader) (int64, error) {
	tokenizer := html.NewTokenizer(r)
	// Set a memory limit on token sizes, otherwise the tokenizer will
	// buffer text indefinitely if it is not broken up by other token types.
	tokenizer.SetMaxBuf(elementSizeLimit)
	active := false
	total := int64(0)
	for {
		tt := tokenizer.Next()
		switch tt {
		case html.ErrorToken:
			err := tokenizer.Err()
			if err == io.EOF {
				err = nil
			}
			if err == nil && active {
				return total, fmt.Errorf("missing </pre> tag")
			}
			return total, err
		case html.TextToken:
			if active {
				// Re-join the separate chunks of text and
				// feed them to the decoder.
				scanner := bufio.NewScanner(bytes.NewReader(tokenizer.Text()))
				scanner.Split(splitASCIIWhitespace)
				for scanner.Scan() {
					n, err := w.Write(scanner.Bytes())
					total += int64(n)
					if err != nil {
						return total, err
					}
				}
				if err := scanner.Err(); err != nil {
					return total, err
				}
			}
		case html.StartTagToken:
			tn, _ := tokenizer.TagName()
			if string(tn) == "pre" {
				if active {
					// nesting not allowed
					return total, fmt.Errorf("unexpected %s", tokenizer.Token())
				}
				active = true
			}
		case html.EndTagToken:
			tn, _ := tokenizer.TagName()
			if string(tn) == "pre" {
				if !active {
					// stray end tag
					return total, fmt.Errorf("unexpected %s", tokenizer.Token())
				}
				active = false
			}
		}
	}
}

// NewArmorDecoder returns a new AMP armor decoder.
func NewArmorDecoder(r io.Reader) (io.Reader, error) {
	pr, pw := io.Pipe()
	go func() {
		_, err := decodeToWriter(pw, r)
		pw.CloseWithError(err)
	}()

	// The first byte inside the element encoding is a server–client
	// protocol version indicator.
	var version [1]byte
	_, err := pr.Read(version[:])
	if err != nil {
		pr.CloseWithError(err)
		return nil, err
	}
	switch version[0] {
	case '0':
		return base64.NewDecoder(base64.StdEncoding, pr), nil
	default:
		err := ErrUnknownVersion(version[0])
		pr.CloseWithError(err)
		return nil, err
	}
}
0707010000002F000081A400000000000000000000000167D9BD4E00001542000000000000000000000000000000000000002D00000000snowflake-2.11.0/common/amp/armor_encoder.gopackage amp

import (
	"encoding/base64"
	"io"
)

// https://amp.dev/boilerplate/
// https://amp.dev/documentation/guides-and-tutorials/learn/spec/amp-boilerplate/?format=websites
// https://amp.dev/documentation/guides-and-tutorials/learn/spec/amphtml/?format=websites#the-amp-html-format
const (
	boilerplateStart = `<!doctype html>
<html amp>
<head>
<meta charset="utf-8">
<script async src="https://cdn.ampproject.org/v0.js"></script>
<link rel="canonical" href="#">
<meta name="viewport" content="width=device-width">
<style amp-boilerplate>body{-webkit-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-moz-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-ms-animation:-amp-start 8s steps(1,end) 0s 1 normal both;animation:-amp-start 8s steps(1,end) 0s 1 normal both}@-webkit-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-moz-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-ms-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-o-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}</style><noscript><style amp-boilerplate>body{-webkit-animation:none;-moz-animation:none;-ms-animation:none;animation:none}</style></noscript>
</head>
<body>
`
	boilerplateEnd = `</body>
</html>`
)

const (
	// We restrict the amount of text may go inside an HTML element, in
	// order to limit the amount a decoder may have to buffer.
	elementSizeLimit = 32 * 1024

	// The payload is conceptually a long base64-encoded string, but we
	// break the string into short chunks separated by whitespace. This is
	// to protect against modification by AMP caches, which reportedly may
	// truncate long words in text:
	// https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985#note_2592348
	bytesPerChunk = 32

	// We set the number of chunks per element so as to stay under
	// elementSizeLimit. Here, we assume that there is 1 byte of whitespace
	// after each chunk (with an additional whitespace byte at the beginning
	// of the element).
	chunksPerElement = (elementSizeLimit - 1) / (bytesPerChunk + 1)
)

// The AMP armor encoder is a chain of a base64 encoder (base64.NewEncoder) and
// an HTML element encoder (elementEncoder). A top-level encoder (armorEncoder)
// coordinates these two, and handles prepending and appending the AMP
// boilerplate. armorEncoder's Write method writes data into the base64 encoder,
// where it makes its way through the chain.

// NewArmorEncoder returns a new AMP armor encoder. Anything written to the
// returned io.WriteCloser will be encoded and written to w. The caller must
// call Close to flush any partially written data and output the AMP boilerplate
// trailer.
func NewArmorEncoder(w io.Writer) (io.WriteCloser, error) {
	// Immediately write the AMP boilerplate header.
	_, err := w.Write([]byte(boilerplateStart))
	if err != nil {
		return nil, err
	}

	element := &elementEncoder{w: w}
	// Write a server–client protocol version indicator, outside the base64
	// layer.
	_, err = element.Write([]byte{'0'})
	if err != nil {
		return nil, err
	}

	base64 := base64.NewEncoder(base64.StdEncoding, element)
	return &armorEncoder{
		w:       w,
		element: element,
		base64:  base64,
	}, nil
}

type armorEncoder struct {
	base64  io.WriteCloser
	element *elementEncoder
	w       io.Writer
}

func (enc *armorEncoder) Write(p []byte) (int, error) {
	// Write into the chain base64 | element | w.
	return enc.base64.Write(p)
}

func (enc *armorEncoder) Close() error {
	// Close the base64 encoder first, to flush out any buffered data and
	// the final padding.
	err := enc.base64.Close()
	if err != nil {
		return err
	}

	// Next, close the element encoder, to close any open elements.
	err = enc.element.Close()
	if err != nil {
		return err
	}

	// Finally, output the AMP boilerplate trailer.
	_, err = enc.w.Write([]byte(boilerplateEnd))
	if err != nil {
		return err
	}

	return nil
}

// elementEncoder arranges written data into pre elements, with the text within
// separated into chunks. It does no HTML encoding, so data written must not
// contain any bytes that are meaningful in HTML.
type elementEncoder struct {
	w              io.Writer
	chunkCounter   int
	elementCounter int
}

func (enc *elementEncoder) Write(p []byte) (n int, err error) {
	total := 0
	for len(p) > 0 {
		if enc.elementCounter == 0 && enc.chunkCounter == 0 {
			_, err := enc.w.Write([]byte("<pre>\n"))
			if err != nil {
				return total, err
			}
		}

		n := bytesPerChunk - enc.chunkCounter
		if n > len(p) {
			n = len(p)
		}
		nn, err := enc.w.Write(p[:n])
		if err != nil {
			return total, err
		}
		total += nn
		p = p[n:]

		enc.chunkCounter += n
		if enc.chunkCounter >= bytesPerChunk {
			enc.chunkCounter = 0
			enc.elementCounter += 1
			nn, err := enc.w.Write([]byte("\n"))
			if err != nil {
				return total, err
			}
			total += nn
		}

		if enc.elementCounter >= chunksPerElement {
			enc.elementCounter = 0
			nn, err := enc.w.Write([]byte("</pre>\n"))
			if err != nil {
				return total, err
			}
			total += nn
		}
	}
	return total, nil
}

func (enc *elementEncoder) Close() error {
	var err error
	if !(enc.elementCounter == 0 && enc.chunkCounter == 0) {
		if enc.chunkCounter == 0 {
			_, err = enc.w.Write([]byte("</pre>\n"))
		} else {
			_, err = enc.w.Write([]byte("\n</pre>\n"))
		}
	}
	return err
}
07070100000030000081A400000000000000000000000167D9BD4E00000E61000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/amp/armor_test.gopackage amp

import (
	"io"
	"math/rand"
	"strings"
	"testing"
)

func armorDecodeToString(src string) (string, error) {
	dec, err := NewArmorDecoder(strings.NewReader(src))
	if err != nil {
		return "", err
	}
	p, err := io.ReadAll(dec)
	return string(p), err
}

func TestArmorDecoder(t *testing.T) {
	for _, test := range []struct {
		input          string
		expectedOutput string
		expectedErr    bool
	}{
		{`
<pre>
0
</pre>
`,
			"",
			false,
		},
		{`
<pre>
0aGVsbG8gd29ybGQK
</pre>
`,
			"hello world\n",
			false,
		},
		// bad version indicator
		{`
<pre>
1aGVsbG8gd29ybGQK
</pre>
`,
			"",
			true,
		},
		// text outside <pre> elements
		{`
0aGVsbG8gd29ybGQK
blah blah blah
<pre>
0aGVsbG8gd29ybGQK
</pre>
0aGVsbG8gd29ybGQK
blah blah blah
`,
			"hello world\n",
			false,
		},
		{`
<pre>
0QUJDREV
GR0hJSkt
MTU5PUFF
SU1RVVld
</pre>
junk
<pre>
YWVowMTI
zNDU2Nzg
5Cg
=
</pre>
<pre>
=
</pre>
`,
			"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\n",
			false,
		},
		// no <pre> elements, hence no version indicator
		{`
aGVsbG8gd29ybGQK
blah blah blah
aGVsbG8gd29ybGQK
aGVsbG8gd29ybGQK
blah blah blah
`,
			"",
			true,
		},
		// empty <pre> elements, hence no version indicator
		{`
aGVsbG8gd29ybGQK
blah blah blah
<pre>   </pre>
aGVsbG8gd29ybGQK
aGVsbG8gd29ybGQK<pre></pre>
blah blah blah
`,
			"",
			true,
		},
		// other elements inside <pre>
		{
			"blah <pre>0aGVsb<p>G8gd29</p>ybGQK</pre>",
			"hello world\n",
			false,
		},
		// HTML comment
		{
			"blah <!-- <pre>aGVsbG8gd29ybGQK</pre> -->",
			"",
			true,
		},
		// all kinds of ASCII whitespace
		{
			"blah <pre>\x200\x09aG\x0aV\x0csb\x0dG8\x20gd29ybGQK</pre>",
			"hello world\n",
			false,
		},

		// bad padding
		{`
<pre>
0QUJDREV
GR0hJSkt
MTU5PUFF
SU1RVVld
</pre>
junk
<pre>
YWVowMTI
zNDU2Nzg
5Cg
=
</pre>
`,
			"",
			true,
		},
		/*
			// per-chunk base64
			// test disabled because Go stdlib handles this incorrectly:
			// https://github.com/golang/go/issues/31626
			{
				"<pre>QQ==</pre><pre>Qg==</pre>",
				"",
				true,
			},
		*/
		// missing </pre>
		{
			"blah <pre></pre><pre>0aGVsbG8gd29ybGQK",
			"",
			true,
		},
		// nested <pre>
		{
			"blah <pre>0aGVsb<pre>G8gd29</pre>ybGQK</pre>",
			"",
			true,
		},
	} {
		output, err := armorDecodeToString(test.input)
		if test.expectedErr && err == nil {
			t.Errorf("%+q → (%+q, %v), expected error", test.input, output, err)
			continue
		}
		if !test.expectedErr && err != nil {
			t.Errorf("%+q → (%+q, %v), expected no error", test.input, output, err)
			continue
		}
		if !test.expectedErr && output != test.expectedOutput {
			t.Errorf("%+q → (%+q, %v), expected (%+q, %v)",
				test.input, output, err, test.expectedOutput, nil)
			continue
		}
	}
}

func armorRoundTrip(s string) (string, error) {
	var encoded strings.Builder
	enc, err := NewArmorEncoder(&encoded)
	if err != nil {
		return "", err
	}
	_, err = io.Copy(enc, strings.NewReader(s))
	if err != nil {
		return "", err
	}
	err = enc.Close()
	if err != nil {
		return "", err
	}
	return armorDecodeToString(encoded.String())
}

func TestArmorRoundTrip(t *testing.T) {
	lengths := make([]int, 0)
	// Test short strings and lengths around elementSizeLimit thresholds.
	for i := 0; i < bytesPerChunk*2; i++ {
		lengths = append(lengths, i)
	}
	for i := -10; i < +10; i++ {
		lengths = append(lengths, elementSizeLimit+i)
		lengths = append(lengths, 2*elementSizeLimit+i)
	}
	for _, n := range lengths {
		buf := make([]byte, n)
		rand.Read(buf)
		input := string(buf)
		output, err := armorRoundTrip(input)
		if err != nil {
			t.Errorf("length %d → error %v", n, err)
			continue
		}
		if output != input {
			t.Errorf("length %d → %+q", n, output)
			continue
		}
	}
}
07070100000031000081A400000000000000000000000167D9BD4E00001C93000000000000000000000000000000000000002500000000snowflake-2.11.0/common/amp/cache.gopackage amp

import (
	"crypto/sha256"
	"encoding/base32"
	"fmt"
	"net"
	"net/url"
	"path"
	"strings"

	"golang.org/x/net/idna"
)

// domainPrefixBasic does the basic domain prefix conversion. Does not do any
// IDNA mapping, such as https://www.unicode.org/reports/tr46/.
//
// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#basic-algorithm
func domainPrefixBasic(domain string) (string, error) {
	// 1. Punycode Decode the publisher domain.
	prefix, err := idna.ToUnicode(domain)
	if err != nil {
		return "", err
	}

	// 2. Replace any "-" (hyphen) character in the output of step 1 with
	//    "--" (two hyphens).
	prefix = strings.Replace(prefix, "-", "--", -1)

	// 3. Replace any "." (dot) character in the output of step 2 with "-"
	//    (hyphen).
	prefix = strings.Replace(prefix, ".", "-", -1)

	// 4. If the output of step 3 has a "-" (hyphen) at both positions 3 and
	//    4, then to the output of step 3, add a prefix of "0-" and add a
	//    suffix of "-0".
	if len(prefix) >= 4 && prefix[2] == '-' && prefix[3] == '-' {
		prefix = "0-" + prefix + "-0"
	}

	// 5. Punycode Encode the output of step 3.
	return idna.ToASCII(prefix)
}

// Lower-case base32 without padding.
var fallbackBase32Encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567").WithPadding(base32.NoPadding)

// domainPrefixFallback does the fallback domain prefix conversion. The returned
// base32 domain uses lower-case letters.
//
// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#fallback-algorithm
func domainPrefixFallback(domain string) string {
	// The algorithm specification does not say what, exactly, we are to
	// take the SHA-256 of. domain is notionally an abstract Unicode
	// string, not a byte sequence. While
	// https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/AmpCurlUrlGenerator.js#L62
	// says "Take the SHA256 of the punycode view of the domain," in reality
	// it hashes the UTF-8 encoding of the domain, without Punycode:
	// https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/AmpCurlUrlGenerator.js#L141
	// https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/browser/Sha256.js#L24
	// We do the same here, hashing the raw bytes of domain, presumed to be
	// UTF-8.

	// 1. Hash the publisher's domain using SHA256.
	h := sha256.Sum256([]byte(domain))

	// 2. Base32 Escape the output of step 1.
	// 3. Remove the last 4 characters from the output of step 2, which are
	//    always "=" (equals) characters.
	return fallbackBase32Encoding.EncodeToString(h[:])
}

// domainPrefix computes the domain prefix of an AMP cache URL.
//
// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#domain-name-prefix
func domainPrefix(domain string) string {
	// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#combined-algorithm
	// 1. Run the Basic Algorithm. If the output is a valid DNS label,
	//    [append the Cache domain suffix and] return. Otherwise continue to
	//    step 2.
	prefix, err := domainPrefixBasic(domain)
	// "A domain prefix is not a valid DNS label if it is longer than 63
	// characters"
	if err == nil && len(prefix) <= 63 {
		return prefix
	}
	// 2. Run the Fallback Algorithm. [Append the Cache domain suffix and]
	//    return.
	return domainPrefixFallback(domain)
}

// CacheURL computes the AMP cache URL for the publisher URL pubURL, using the
// AMP cache at cacheURL. contentType is a string such as "c" or "i" that
// indicates what type of serving the AMP cache is to perform. The Scheme of
// pubURL must be "http" or "https". The Port of pubURL, if any, must match the
// default for the scheme. cacheURL may not have RawQuery, Fragment, or
// RawFragment set, because the resulting URL's query and fragment are taken
// from the publisher URL.
//
// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/
func CacheURL(pubURL, cacheURL *url.URL, contentType string) (*url.URL, error) {
	// The cache URL subdomain, including the domain prefix corresponding to
	// the publisher URL's domain.
	resultHost := domainPrefix(pubURL.Hostname()) + "." + cacheURL.Hostname()
	if cacheURL.Port() != "" {
		resultHost = net.JoinHostPort(resultHost, cacheURL.Port())
	}

	// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#url-path
	// The first part of the path is the cache URL's own path, if any.
	pathComponents := []string{cacheURL.EscapedPath()}
	// The next path component is the content type. We cannot encode an
	// empty content type, because it would result in consecutive path
	// separators, which would semantically combine into a single separator.
	if contentType == "" {
		return nil, fmt.Errorf("invalid content type %+q", contentType)
	}
	pathComponents = append(pathComponents, url.PathEscape(contentType))
	// Then, we add an "s" path component, if the publisher URL scheme is
	// "https".
	switch pubURL.Scheme {
	case "http":
		// Do nothing.
	case "https":
		pathComponents = append(pathComponents, "s")
	default:
		return nil, fmt.Errorf("invalid scheme %+q in publisher URL", pubURL.Scheme)
	}
	// The next path component is the publisher URL's host. The AMP cache
	// URL format specification is not clear about whether other
	// subcomponents of the authority (namely userinfo and port) may appear
	// here. We adopt a policy of forbidding userinfo, and requiring that
	// the port be the default for the scheme (and then we omit the port
	// entirely from the returned URL).
	if pubURL.User != nil {
		return nil, fmt.Errorf("publisher URL may not contain userinfo")
	}
	if port := pubURL.Port(); port != "" {
		if !((pubURL.Scheme == "http" && port == "80") || (pubURL.Scheme == "https" && port == "443")) {
			return nil, fmt.Errorf("publisher URL port %+q is not the default for scheme %+q", port, pubURL.Scheme)
		}
	}
	// As with the content type, we cannot encode an empty host, because
	// that would result in an empty path component.
	if pubURL.Hostname() == "" {
		return nil, fmt.Errorf("invalid host %+q in publisher URL", pubURL.Hostname())
	}
	pathComponents = append(pathComponents, url.PathEscape(pubURL.Hostname()))
	// Finally, we append the remainder of the original escaped path from
	// the publisher URL.
	pathComponents = append(pathComponents, pubURL.EscapedPath())

	resultRawPath := path.Join(pathComponents...)
	resultPath, err := url.PathUnescape(resultRawPath)
	if err != nil {
		return nil, err
	}

	// The query and fragment of the returned URL always come from pubURL.
	// Any query or fragment of cacheURL would be ignored. Return an error
	// if either is set.
	if cacheURL.RawQuery != "" {
		return nil, fmt.Errorf("cache URL may not contain a query")
	}
	if cacheURL.Fragment != "" {
		return nil, fmt.Errorf("cache URL may not contain a fragment")
	}

	return &url.URL{
		Scheme:   cacheURL.Scheme,
		User:     cacheURL.User,
		Host:     resultHost,
		Path:     resultPath,
		RawPath:  resultRawPath,
		RawQuery: pubURL.RawQuery,
		Fragment: pubURL.Fragment,
	}, nil
}
07070100000032000081A400000000000000000000000167D9BD4E000021D7000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/amp/cache_test.gopackage amp

import (
	"bytes"
	"net/url"
	"testing"

	"golang.org/x/net/idna"
)

func TestDomainPrefixBasic(t *testing.T) {
	// Tests expecting no error.
	for _, test := range []struct {
		domain, expected string
	}{
		{"", ""},
		{"xn--", ""},
		{"...", "---"},

		// Should not apply mappings such as case folding and
		// normalization.
		{"b\u00fccher.de", "xn--bcher-de-65a"},
		{"B\u00fccher.de", "xn--Bcher-de-65a"},
		{"bu\u0308cher.de", "xn--bucher-de-hkf"},

		// Check some that differ between IDNA 2003 and IDNA 2008.
		// https://unicode.org/reports/tr46/#Deviations
		// https://util.unicode.org/UnicodeJsps/idna.jsp
		{"faß.de", "xn--fa-de-mqa"},
		{"βόλοσ.com", "xn---com-4ld8c2a6a8e"},

		// Lengths of 63 and 64. 64 is too long for a DNS label, but
		// domainPrefixBasic is not expected to check for that.
		{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
		{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},

		// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#basic-algorithm
		{"example.com", "example-com"},
		{"foo.example.com", "foo-example-com"},
		{"foo-example.com", "foo--example-com"},
		{"xn--57hw060o.com", "xn---com-p33b41770a"},
		{"\u26a1\U0001f60a.com", "xn---com-p33b41770a"},
		{"en-us.example.com", "0-en--us-example-com-0"},
	} {
		output, err := domainPrefixBasic(test.domain)
		if err != nil || output != test.expected {
			t.Errorf("%+q → (%+q, %v), expected (%+q, %v)",
				test.domain, output, err, test.expected, nil)
		}
	}

	// Tests expecting an error.
	for _, domain := range []string{
		"xn---",
	} {
		output, err := domainPrefixBasic(domain)
		if err == nil || output != "" {
			t.Errorf("%+q → (%+q, %v), expected (%+q, non-nil)",
				domain, output, err, "")
		}
	}
}

func TestDomainPrefixFallback(t *testing.T) {
	for _, test := range []struct {
		domain, expected string
	}{
		{
			"",
			"4oymiquy7qobjgx36tejs35zeqt24qpemsnzgtfeswmrw6csxbkq",
		},
		{
			"example.com",
			"un42n5xov642kxrxrqiyanhcoupgql5lt4wtbkyt2ijflbwodfdq",
		},

		// These checked against the output of
		// https://github.com/ampproject/amp-toolbox/tree/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url,
		// using the widget at
		// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#url-format.
		{
			"000000000000000000000000000000000000000000000000000000000000.com",
			"stejanx4hsijaoj4secyecy4nvqodk56kw72whwcmvdbtucibf5a",
		},
		{
			"00000000000000000000000000000000000000000000000000000000000a.com",
			"jdcvbsorpnc3hcjrhst56nfm6ymdpovlawdbm2efyxpvlt4cpbya",
		},
		{
			"00000000000000000000000000000000000000000000000000000000000\u03bb.com",
			"qhzqeumjkfpcpuic3vqruyjswcr7y7gcm3crqyhhywvn3xrhchfa",
		},
	} {
		output := domainPrefixFallback(test.domain)
		if output != test.expected {
			t.Errorf("%+q → %+q, expected %+q",
				test.domain, output, test.expected)
		}
	}
}

// Checks that domainPrefix chooses domainPrefixBasic or domainPrefixFallback as
// appropriate; i.e., always returns string that is a valid DNS label and is
// IDNA-decodable.
func TestDomainPrefix(t *testing.T) {
	// A validating IDNA profile, which checks label length and that the
	// label contains only certain ASCII characters. It does not do the
	// ValidateLabels check, because that depends on the input having
	// certain properties.
	profile := idna.New(
		idna.VerifyDNSLength(true),
		idna.StrictDomainName(true),
	)
	for _, domain := range []string{
		"example.com",
		"\u0314example.com",
		"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",  // 63 bytes
		"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // 64 bytes
		"xn--57hw060o.com",
		"a b c",
	} {
		output := domainPrefix(domain)
		if bytes.IndexByte([]byte(output), '.') != -1 {
			t.Errorf("%+q → %+q contains a dot", domain, output)
		}
		_, err := profile.ToUnicode(output)
		if err != nil {
			t.Errorf("%+q → error %v", domain, err)
		}
	}
}

func mustParseURL(rawurl string) *url.URL {
	u, err := url.Parse(rawurl)
	if err != nil {
		panic(err)
	}
	return u
}

func TestCacheURL(t *testing.T) {
	// Tests expecting no error.
	for _, test := range []struct {
		pub         string
		cache       string
		contentType string
		expected    string
	}{
		// With or without trailing slash on pubURL.
		{
			"http://example.com/",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/example.com",
		},
		{
			"http://example.com",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/example.com",
		},
		// https pubURL.
		{
			"https://example.com/",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/s/example.com",
		},
		// The content type should be escaped if necessary.
		{
			"http://example.com/",
			"https://amp.cache/",
			"/",
			"https://example-com.amp.cache/%2F/example.com",
		},
		// Retain pubURL path, query, and fragment, including escaping.
		{
			"http://example.com/my%2Fpath/index.html?a=1#fragment",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/example.com/my%2Fpath/index.html?a=1#fragment",
		},
		// Retain scheme, userinfo, port, and path of cacheURL, escaping
		// whatever is necessary.
		{
			"http://example.com",
			"http://cache%2Fuser:cache%40pass@amp.cache:123/with/../../path/..%2f../",
			"c",
			"http://cache%2Fuser:cache%40pass@example-com.amp.cache:123/path/..%2f../c/example.com",
		},
		// Port numbers in pubURL are allowed, if they're the default
		// for scheme.
		{
			"http://example.com:80/",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/example.com",
		},
		{
			"https://example.com:443/",
			"https://amp.cache/",
			"c",
			"https://example-com.amp.cache/c/s/example.com",
		},
		// "?" at the end of cacheURL is okay, as long as the query is
		// empty.
		{
			"http://example.com/",
			"https://amp.cache/?",
			"c",
			"https://example-com.amp.cache/c/example.com",
		},

		// https://developers.google.com/amp/cache/overview#example-requesting-document-using-tls
		{
			"https://example.com/amp_document.html",
			"https://cdn.ampproject.org/",
			"c",
			"https://example-com.cdn.ampproject.org/c/s/example.com/amp_document.html",
		},
		// https://developers.google.com/amp/cache/overview#example-requesting-image-using-plain-http
		{
			"http://example.com/logo.png",
			"https://cdn.ampproject.org/",
			"i",
			"https://example-com.cdn.ampproject.org/i/example.com/logo.png",
		},
		// https://developers.google.com/amp/cache/overview#query-parameter-example
		{
			"https://example.com/g?value=Hello%20World",
			"https://cdn.ampproject.org/",
			"c",
			"https://example-com.cdn.ampproject.org/c/s/example.com/g?value=Hello%20World",
		},
	} {
		pubURL := mustParseURL(test.pub)
		cacheURL := mustParseURL(test.cache)
		outputURL, err := CacheURL(pubURL, cacheURL, test.contentType)
		if err != nil {
			t.Errorf("%+q %+q %+q → error %v",
				test.pub, test.cache, test.contentType, err)
			continue
		}
		if outputURL.String() != test.expected {
			t.Errorf("%+q %+q %+q → %+q, expected %+q",
				test.pub, test.cache, test.contentType, outputURL, test.expected)
			continue
		}
	}

	// Tests expecting an error.
	for _, test := range []struct {
		pub         string
		cache       string
		contentType string
	}{
		// Empty content type.
		{
			"http://example.com/",
			"https://amp.cache/",
			"",
		},
		// Empty host.
		{
			"http:///index.html",
			"https://amp.cache/",
			"c",
		},
		// Empty scheme.
		{
			"//example.com/",
			"https://amp.cache/",
			"c",
		},
		// Unrecognized scheme.
		{
			"ftp://example.com/",
			"https://amp.cache/",
			"c",
		},
		// Wrong port number for scheme.
		{
			"http://example.com:443/",
			"https://amp.cache/",
			"c",
		},
		// userinfo in pubURL.
		{
			"http://user@example.com/",
			"https://amp.cache/",
			"c",
		},
		{
			"http://user:pass@example.com/",
			"https://amp.cache/",
			"c",
		},
		// cacheURL may not contain a query.
		{
			"http://example.com/",
			"https://amp.cache/?a=1",
			"c",
		},
		// cacheURL may not contain a fragment.
		{
			"http://example.com/",
			"https://amp.cache/#fragment",
			"c",
		},
	} {
		pubURL := mustParseURL(test.pub)
		cacheURL := mustParseURL(test.cache)
		outputURL, err := CacheURL(pubURL, cacheURL, test.contentType)
		if err == nil {
			t.Errorf("%+q %+q %+q → %+q, expected error",
				test.pub, test.cache, test.contentType, outputURL)
			continue
		}
	}
}
07070100000033000081A400000000000000000000000167D9BD4E000011AD000000000000000000000000000000000000002300000000snowflake-2.11.0/common/amp/doc.go/*
Package amp provides functions for working with the AMP (Accelerated Mobile
Pages) subset of HTML, and conveying binary data through an AMP cache.

# AMP cache

The CacheURL function takes a plain URL and converts it to be accessed through a
given AMP cache.

The EncodePath and DecodePath functions provide a way to encode data into the
suffix of a URL path. AMP caches do not support HTTP POST, but encoding data
into a URL path with GET is an alternative means of sending data to the server.
The format of an encoded path is:

	0<0 or more bytes, including slash>/<base64 of data>

That is:
* "0", a format version number, which controls the interpretation of the rest of
the path. Only the first byte matters as a version indicator (not the whole
first path component).
* Any number of slash or non-slash bytes. These may be used as padding or to
prevent cache collisions in the AMP cache.
* A final slash.
* base64 encoding of the data, using the URL-safe alphabet (which does not
include slash).

For example, an encoding of the string "This is path-encoded data." is the
following. The "lgWHcwhXFjUm" following the format version number is random
padding that will be ignored on decoding.

	0lgWHcwhXFjUm/VGhpcyBpcyBwYXRoLWVuY29kZWQgZGF0YS4

It is the caller's responsibility to add or remove any directory path prefix
before calling EncodePath or DecodePath.

# AMP armor

AMP armor is a data encoding scheme that that satisfies the requirements of the
AMP (Accelerated Mobile Pages) subset of HTML, and survives modification by an
AMP cache. For the requirements of AMP HTML, see
https://amp.dev/documentation/guides-and-tutorials/learn/spec/amphtml/.
For modifications that may be made by an AMP cache, see
https://github.com/ampproject/amphtml/blob/main/docs/spec/amp-cache-modifications.md.

The encoding is based on ones created by Ivan Markin. See codec/amp/ in
https://github.com/nogoegst/amper and discussion at
https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985.

The encoding algorithm works as follows. Base64-encode the input. Prepend the
input with the byte '0'; this is a protocol version indicator that the decoder
can use to determine how to interpret the bytes that follow. Split the base64
into fixed-size chunks separated by whitespace. Take up to 1024 chunks at a
time, and wrap them in a pre element. Then, situate the markup so far within the
body of the AMP HTML boilerplate. The decoding algorithm is to scan the HTML for
pre elements, split their text contents on whitespace and concatenate, then
base64 decode. The base64 encoding uses the standard alphabet, with normal "="
padding (https://tools.ietf.org/html/rfc4648#section-4).

The reason for splitting the base64 into chunks is that AMP caches reportedly
truncate long strings that are not broken by whitespace:
https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985#note_2592348.
The characters that may separate the chunks are the ASCII whitespace characters
(https://infra.spec.whatwg.org/#ascii-whitespace) "\x09", "\x0a", "\x0c",
"\x0d", and "\x20". The reason for separating the chunks into pre elements is to
limit the amount of text a decoder may have to buffer while parsing the HTML.
Each pre element may contain at most 64 KB of text. pre elements may not be
nested.

# Example

The following is the result of encoding the string
"This was encoded with AMP armor.":

	<!doctype html>
	<html amp>
	<head>
	<meta charset="utf-8">
	<script async src="https://cdn.ampproject.org/v0.js"></script>
	<link rel="canonical" href="#">
	<meta name="viewport" content="width=device-width">
	<style amp-boilerplate>body{-webkit-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-moz-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-ms-animation:-amp-start 8s steps(1,end) 0s 1 normal both;animation:-amp-start 8s steps(1,end) 0s 1 normal both}@-webkit-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-moz-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-ms-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-o-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}</style><noscript><style amp-boilerplate>body{-webkit-animation:none;-moz-animation:none;-ms-animation:none;animation:none}</style></noscript>
	</head>
	<body>
	<pre>
	0VGhpcyB3YXMgZW5jb2RlZCB3aXRoIEF
	NUCBhcm1vci4=
	</pre>
	</body>
	</html>
*/
package amp
07070100000034000081A400000000000000000000000167D9BD4E000004BC000000000000000000000000000000000000002400000000snowflake-2.11.0/common/amp/path.gopackage amp

import (
	"crypto/rand"
	"encoding/base64"
	"fmt"
	"strings"
)

// EncodePath encodes data in a way that is suitable for the suffix of an AMP
// cache URL.
func EncodePath(data []byte) string {
	var cacheBreaker [9]byte
	_, err := rand.Read(cacheBreaker[:])
	if err != nil {
		panic(err)
	}
	b64 := base64.RawURLEncoding.EncodeToString
	return "0" + b64(cacheBreaker[:]) + "/" + b64(data)
}

// DecodePath decodes data from a path suffix as encoded by EncodePath. The path
// must have already been trimmed of any directory prefix (as might be present
// in, e.g., an HTTP request). That is, the first character of path should be
// the "0" message format indicator.
func DecodePath(path string) ([]byte, error) {
	if len(path) < 1 {
		return nil, fmt.Errorf("missing format indicator")
	}
	version := path[0]
	rest := path[1:]
	switch version {
	case '0':
		// Ignore everything else up to and including the final slash
		// (there must be at least one slash).
		i := strings.LastIndexByte(rest, '/')
		if i == -1 {
			return nil, fmt.Errorf("missing data")
		}
		return base64.RawURLEncoding.DecodeString(rest[i+1:])
	default:
		return nil, fmt.Errorf("unknown format indicator %q", version)
	}
}
07070100000035000081A400000000000000000000000167D9BD4E00000513000000000000000000000000000000000000002900000000snowflake-2.11.0/common/amp/path_test.gopackage amp

import (
	"testing"
)

func TestDecodePath(t *testing.T) {
	for _, test := range []struct {
		path           string
		expectedData   string
		expectedErrStr string
	}{
		{"", "", "missing format indicator"},
		{"0", "", "missing data"},
		{"0foobar", "", "missing data"},
		{"/0/YWJj", "", "unknown format indicator '/'"},

		{"0/", "", ""},
		{"0foobar/", "", ""},
		{"0/YWJj", "abc", ""},
		{"0///YWJj", "abc", ""},
		{"0foobar/YWJj", "abc", ""},
		{"0/foobar/YWJj", "abc", ""},
	} {
		data, err := DecodePath(test.path)
		if test.expectedErrStr != "" {
			if err == nil || err.Error() != test.expectedErrStr {
				t.Errorf("%+q expected error %+q, got %+q",
					test.path, test.expectedErrStr, err)
			}
		} else if err != nil {
			t.Errorf("%+q expected no error, got %+q", test.path, err)
		} else if string(data) != test.expectedData {
			t.Errorf("%+q expected data %+q, got %+q",
				test.path, test.expectedData, data)
		}
	}
}

func TestPathRoundTrip(t *testing.T) {
	for _, data := range []string{
		"",
		"\x00",
		"/",
		"hello world",
	} {
		decoded, err := DecodePath(EncodePath([]byte(data)))
		if err != nil {
			t.Errorf("%+q roundtripped with error %v", data, err)
		} else if string(decoded) != data {
			t.Errorf("%+q roundtripped to %+q", data, decoded)
		}
	}
}
07070100000036000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/bridgefingerprint07070100000037000081A400000000000000000000000167D9BD4E00000269000000000000000000000000000000000000003900000000snowflake-2.11.0/common/bridgefingerprint/fingerprint.gopackage bridgefingerprint

import (
	"encoding/hex"
	"errors"
)

type Fingerprint string

var ErrBridgeFingerprintInvalid = errors.New("bridge fingerprint invalid")

func FingerprintFromBytes(bytes []byte) (Fingerprint, error) {
	n := len(bytes)
	if n != 20 && n != 32 {
		return Fingerprint(""), ErrBridgeFingerprintInvalid
	}
	return Fingerprint(bytes), nil
}

func FingerprintFromHexString(hexString string) (Fingerprint, error) {
	decoded, err := hex.DecodeString(hexString)
	if err != nil {
		return "", err
	}
	return FingerprintFromBytes(decoded)
}

func (f Fingerprint) ToBytes() []byte {
	return []byte(f)
}
07070100000038000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001E00000000snowflake-2.11.0/common/certs07070100000039000081A400000000000000000000000167D9BD4E000009F6000000000000000000000000000000000000002700000000snowflake-2.11.0/common/certs/certs.gopackage certs

import (
	"crypto/x509"
	"log"
)

// https://crt.sh/?id=9314791
const LetsEncryptRootCert = `-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
-----END CERTIFICATE-----`

// GetRootCAs is a workaround for older versions of Android that do not trust
// Let's Encrypt's ISRG Root X1. This manually adds the ISRG root to the device's
// existing cert pool.
func GetRootCAs() *x509.CertPool {
	rootCerts, err := x509.SystemCertPool()
	if err != nil {
		rootCerts = x509.NewCertPool()
	}
	if ok := rootCerts.AppendCertsFromPEM([]byte(LetsEncryptRootCert)); !ok {
		log.Println("Error appending Let's Encrypt root certificate to cert poool")
		return nil
	}
	return rootCerts
}
0707010000003A000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002200000000snowflake-2.11.0/common/constants0707010000003B000081A400000000000000000000000167D9BD4E0000013E000000000000000000000000000000000000002F00000000snowflake-2.11.0/common/constants/constants.gopackage constants

const (
	// If the broker does not receive the proxy answer within this many seconds
	// after the broker received the client offer,
	// the broker will respond with an error to the client.
	//
	// this is calibrated to match the timeout of the CDNs we use for rendezvous
	BrokerClientTimeout = 5
)
0707010000003C000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002600000000snowflake-2.11.0/common/encapsulation0707010000003D000081A400000000000000000000000167D9BD4E00001A2C000000000000000000000000000000000000003700000000snowflake-2.11.0/common/encapsulation/encapsulation.go// Package encapsulation implements a way of encoding variable-size chunks of
// data and padding into a byte stream.
//
// Each chunk of data or padding starts with a variable-size length prefix. One
// bit ("d") in the first byte of the prefix indicates whether the chunk
// represents data or padding (1=data, 0=padding). Another bit ("c" for
// "continuation") is the indicates whether there are more bytes in the length
// prefix. The remaining 6 bits ("x") encode part of the length value.
//
//	dcxxxxxx
//
// If the continuation bit is set, then the next byte is also part of the length
// prefix. It lacks the "d" bit, has its own "c" bit, and 7 value-carrying bits
// ("y").
//
//	cyyyyyyy
//
// The length is decoded by concatenating value-carrying bits, from left to
// right, of all value-carrying bits, up to and including the first byte whose
// "c" bit is 0. Although in principle this encoding would allow for length
// prefixes of any size, length prefixes are arbitrarily limited to 3 bytes and
// any attempt to read or write a longer one is an error. These are therefore
// the only valid formats:
//
//	00xxxxxx			xxxxxx₂ bytes of padding
//	10xxxxxx			xxxxxx₂ bytes of data
//	01xxxxxx 0yyyyyyy		xxxxxxyyyyyyy₂ bytes of padding
//	11xxxxxx 0yyyyyyy		xxxxxxyyyyyyy₂ bytes of data
//	01xxxxxx 1yyyyyyy 0zzzzzzz	xxxxxxyyyyyyyzzzzzzz₂ bytes of padding
//	11xxxxxx 1yyyyyyy 0zzzzzzz	xxxxxxyyyyyyyzzzzzzz₂ bytes of data
//
// The maximum encodable length is 11111111111111111111₂ = 0xfffff = 1048575.
// There is no requirement to use a length prefix of minimum size; i.e. 00000100
// and 01000000 00000100 are both valid encodings of the value 4.
//
// After the length prefix follow that many bytes of padding or data. There are
// no restrictions on the value of bytes comprising padding.
//
// The idea for this encapsulation is sketched here:
// https://github.com/net4people/bbs/issues/9#issuecomment-524095186
package encapsulation

import (
	"errors"
	"io"
)

// ErrTooLong is the error returned when an encoded length prefix is longer than
// 3 bytes, or when ReadData receives an input whose length is too large to
// encode in a 3-byte length prefix.
var ErrTooLong = errors.New("length prefix is too long")

// ReadData the next available data chunk, skipping over any padding chunks that
// may come first, and copies the data into p. If p is shorter than the length
// of the data chunk, only the first len(p) bytes are copied into p, and the
// error return is io.ErrShortBuffer. The returned error value is nil if and
// only if a data chunk was present and was read in its entirety. The returned
// error is io.EOF only if r ended before the first byte of a length prefix. If
// r ended in the middle of a length prefix or data/padding, the returned error
// is io.ErrUnexpectedEOF.
func ReadData(r io.Reader, p []byte) (int, error) {
	for {
		var b [1]byte
		_, err := r.Read(b[:])
		if err != nil {
			// This is the only place we may return a real io.EOF.
			return 0, err
		}
		isData := (b[0] & 0x80) != 0
		moreLength := (b[0] & 0x40) != 0
		n := int(b[0] & 0x3f)
		for i := 0; moreLength; i++ {
			if i >= 2 {
				return 0, ErrTooLong
			}
			_, err := r.Read(b[:])
			if err == io.EOF {
				err = io.ErrUnexpectedEOF
			}
			if err != nil {
				return 0, err
			}
			moreLength = (b[0] & 0x80) != 0
			n = (n << 7) | int(b[0]&0x7f)
		}
		if isData {
			if len(p) > n {
				p = p[:n]
			}
			numData, err := io.ReadFull(r, p)
			if err == nil && numData < n {
				// If the caller's buffer was too short, discard
				// the rest of the data and return
				// io.ErrShortBuffer.
				_, err = io.CopyN(io.Discard, r, int64(n-numData))
				if err == nil {
					err = io.ErrShortBuffer
				}
			}
			if err == io.EOF {
				err = io.ErrUnexpectedEOF
			}
			return numData, err
		} else if n > 0 {
			_, err := io.CopyN(io.Discard, r, int64(n))
			if err == io.EOF {
				err = io.ErrUnexpectedEOF
			}
			if err != nil {
				return 0, err
			}
		}
	}
}

// dataPrefixForLength returns a length prefix for the given length, with the
// "d" bit set to 1.
func dataPrefixForLength(n int) ([]byte, error) {
	switch {
	case (n>>0)&0x3f == (n >> 0):
		return []byte{0x80 | byte((n>>0)&0x3f)}, nil
	case (n>>7)&0x3f == (n >> 7):
		return []byte{0xc0 | byte((n>>7)&0x3f), byte((n >> 0) & 0x7f)}, nil
	case (n>>14)&0x3f == (n >> 14):
		return []byte{0xc0 | byte((n>>14)&0x3f), 0x80 | byte((n>>7)&0x7f), byte((n >> 0) & 0x7f)}, nil
	default:
		return nil, ErrTooLong
	}
}

// WriteData encodes a data chunk into w. It returns the total number of bytes
// written; i.e., including the length prefix. The error is ErrTooLong if the
// length of data cannot fit into a length prefix.
func WriteData(w io.Writer, data []byte) (int, error) {
	prefix, err := dataPrefixForLength(len(data))
	if err != nil {
		return 0, err
	}
	total := 0
	n, err := w.Write(prefix)
	total += n
	if err != nil {
		return total, err
	}
	n, err = w.Write(data)
	total += n
	return total, err
}

var paddingBuffer [1024]byte

// WritePadding encodes padding chunks, whose total size (including their own
// length prefixes) is n. Returns the total number of bytes written to w, which
// will be exactly n unless there was an error. The error cannot be ErrTooLong
// because this function will write multiple padding chunks if necessary to
// reach the requested size. Panics if n is negative.
func WritePadding(w io.Writer, n int) (int, error) {
	if n < 0 {
		panic("negative length")
	}
	total := 0
	for n > 0 {
		p := len(paddingBuffer)
		if p > n {
			p = n
		}
		n -= p
		var prefix []byte
		switch {
		case ((p-1)>>0)&0x3f == ((p - 1) >> 0):
			p = p - 1
			prefix = []byte{byte((p >> 0) & 0x3f)}
		case ((p-2)>>7)&0x3f == ((p - 2) >> 7):
			p = p - 2
			prefix = []byte{0x40 | byte((p>>7)&0x3f), byte((p >> 0) & 0x7f)}
		case ((p-3)>>14)&0x3f == ((p - 3) >> 14):
			p = p - 3
			prefix = []byte{0x40 | byte((p>>14)&0x3f), 0x80 | byte((p>>7)&0x3f), byte((p >> 0) & 0x7f)}
		}
		nn, err := w.Write(prefix)
		total += nn
		if err != nil {
			return total, err
		}
		nn, err = w.Write(paddingBuffer[:p])
		total += nn
		if err != nil {
			return total, err
		}
	}
	return total, nil
}

// MaxDataForSize returns the length of the longest slice that can pe passed to
// WriteData, whose total encoded size (including length prefix) is no larger
// than n. Call this to find out if a chunk of data will fit into a length
// budget. Panics if n == 0.
func MaxDataForSize(n int) int {
	if n == 0 {
		panic("zero length")
	}
	prefix, err := dataPrefixForLength(n)
	if err == ErrTooLong {
		return (1 << (6 + 7 + 7)) - 1 - 3
	} else if err != nil {
		panic(err)
	}
	return n - len(prefix)
}
0707010000003E000081A400000000000000000000000167D9BD4E00003019000000000000000000000000000000000000003C00000000snowflake-2.11.0/common/encapsulation/encapsulation_test.gopackage encapsulation

import (
	"bytes"
	"io"
	"math/rand"
	"testing"
)

// Return a byte slice with non-trivial contents.
func pseudorandomBuffer(n int) []byte {
	source := rand.NewSource(0)
	p := make([]byte, n)
	for i := 0; i < len(p); i++ {
		p[i] = byte(source.Int63() & 0xff)
	}
	return p
}

func mustWriteData(w io.Writer, p []byte) int {
	n, err := WriteData(w, p)
	if err != nil {
		panic(err)
	}
	return n
}

func mustWritePadding(w io.Writer, n int) int {
	n, err := WritePadding(w, n)
	if err != nil {
		panic(err)
	}
	return n
}

// Test that ReadData(WriteData()) recovers the original data.
func TestRoundtrip(t *testing.T) {
	// Test above and below interesting thresholds.
	for _, i := range []int{
		0x00, 0x01,
		0x3e, 0x3f, 0x40, 0x41,
		0xfe, 0xff, 0x100, 0x101,
		0x1ffe, 0x1fff, 0x2000, 0x2001,
		0xfffe, 0xffff, 0x10000, 0x10001,
		0xffffe, 0xfffff,
	} {
		original := pseudorandomBuffer(i)
		var enc bytes.Buffer
		n, err := WriteData(&enc, original)
		if err != nil {
			t.Fatalf("size %d, WriteData returned error %v", i, err)
		}
		if enc.Len() != n {
			t.Fatalf("size %d, returned length was %d, written length was %d",
				i, n, enc.Len())
		}
		inverse := make([]byte, i)
		n, err = ReadData(&enc, inverse)
		if err != nil {
			t.Fatalf("size %d, ReadData returned error %v", i, err)
		}
		if !bytes.Equal(inverse[:n], original) {
			t.Fatalf("size %d, got <%x>, expected <%x>", i, inverse[:n], original)
		}
	}
}

// Test that WritePadding writes exactly as much as requested.
func TestPaddingLength(t *testing.T) {
	// Test above and below interesting thresholds. WritePadding also gets
	// values above 0xfffff, the maximum value of a single length prefix.
	for _, i := range []int{
		0x00, 0x01,
		0x3f, 0x40, 0x41, 0x42,
		0xff, 0x100, 0x101, 0x102,
		0x2000, 0x2001, 0x2002, 0x2003,
		0x10000, 0x10001, 0x10002, 0x10003,
		0x100001, 0x100002, 0x100003, 0x100004,
	} {
		var enc bytes.Buffer
		n, err := WritePadding(&enc, i)
		if err != nil {
			t.Fatalf("size %d, WritePadding returned error %v", i, err)
		}
		if n != i {
			t.Fatalf("requested %d bytes, returned %d", i, n)
		}
		if enc.Len() != n {
			t.Fatalf("requested %d bytes, wrote %d bytes", i, enc.Len())
		}
	}
}

// Test that ReadData skips over padding.
func TestSkipPadding(t *testing.T) {
	var data = [][]byte{{}, {}, []byte("hello"), {}, []byte("world")}
	var enc bytes.Buffer
	mustWritePadding(&enc, 10)
	mustWritePadding(&enc, 100)
	mustWriteData(&enc, data[0])
	mustWriteData(&enc, data[1])
	mustWritePadding(&enc, 10)
	mustWriteData(&enc, data[2])
	mustWriteData(&enc, data[3])
	mustWritePadding(&enc, 10)
	mustWriteData(&enc, data[4])
	mustWritePadding(&enc, 10)
	mustWritePadding(&enc, 10)
	for i, expected := range data {
		var actual [10]byte
		n, err := ReadData(&enc, actual[:])
		if err != nil {
			t.Fatalf("slice %d, got error %v, expected %v", i, err, nil)
		}
		if !bytes.Equal(actual[:n], expected) {
			t.Fatalf("slice %d, got <%x>, expected <%x>", i, actual[:n], expected)
		}
	}
	n, err := ReadData(&enc, nil)
	if n != 0 || err != io.EOF {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, io.EOF)
	}
}

// Test that EOF before a length prefix returns io.EOF.
func TestEOF(t *testing.T) {
	n, err := ReadData(bytes.NewReader(nil), nil)
	if n != 0 || err != io.EOF {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, io.EOF)
	}
}

// Test that an EOF while reading a length prefix, or while reading the
// subsequent data/padding, returns io.ErrUnexpectedEOF.
func TestUnexpectedEOF(t *testing.T) {
	for _, test := range [][]byte{
		{0x40},                  // expecting a second length byte
		{0xc0},                  // expecting a second length byte
		{0x41, 0x80},            // expecting a third length byte
		{0xc1, 0x80},            // expecting a third length byte
		{0x02},                  // expecting 2 bytes of padding
		{0x82},                  // expecting 2 bytes of data
		{0x02, 'X'},             // expecting 1 byte of padding
		{0x82, 'X'},             // expecting 1 byte of data
		{0x41, 0x00},            // expecting 128 bytes of padding
		{0xc1, 0x00},            // expecting 128 bytes of data
		{0x41, 0x00, 'X'},       // expecting 127 bytes of padding
		{0xc1, 0x00, 'X'},       // expecting 127 bytes of data
		{0x41, 0x80, 0x00},      // expecting 32768 bytes of padding
		{0xc1, 0x80, 0x00},      // expecting 32768 bytes of data
		{0x41, 0x80, 0x00, 'X'}, // expecting 32767 bytes of padding
		{0xc1, 0x80, 0x00, 'X'}, // expecting 32767 bytes of data
	} {
		n, err := ReadData(bytes.NewReader(test), nil)
		if n != 0 || err != io.ErrUnexpectedEOF {
			t.Fatalf("<%x> got (%v, %v), expected (%v, %v)", test, n, err, 0, io.ErrUnexpectedEOF)
		}
	}
}

// Test that length encodings that are longer than they could be are still
// interpreted.
func TestNonMinimalLengthEncoding(t *testing.T) {
	for _, test := range []struct {
		enc      []byte
		expected []byte
	}{
		{[]byte{0x81, 'X'}, []byte("X")},
		{[]byte{0xc0, 0x01, 'X'}, []byte("X")},
		{[]byte{0xc0, 0x80, 0x01, 'X'}, []byte("X")},
	} {
		var p [10]byte
		n, err := ReadData(bytes.NewReader(test.enc), p[:])
		if err != nil {
			t.Fatalf("<%x> got error %v, expected %v", test.enc, err, nil)
		}
		if !bytes.Equal(p[:n], test.expected) {
			t.Fatalf("<%x> got <%x>, expected <%x>", test.enc, p[:n], test.expected)
		}
	}
}

// Test that ReadData only reads up to 3 bytes of length prefix.
func TestReadLimits(t *testing.T) {
	// Test the maximum length that's possible with 3 bytes of length
	// prefix.
	maxLength := (0x3f << 14) | (0x7f << 7) | 0x7f
	data := bytes.Repeat([]byte{'X'}, maxLength)
	prefix := []byte{0xff, 0xff, 0x7f} // encodes 0xfffff
	var p [0xfffff]byte
	n, err := ReadData(bytes.NewReader(append(prefix, data...)), p[:])
	if err != nil {
		t.Fatalf("got error %v, expected %v", err, nil)
	}
	if !bytes.Equal(p[:n], data) {
		t.Fatalf("got %d bytes unequal to %d bytes", len(p), len(data))
	}
	// Test a 4-byte prefix.
	prefix = []byte{0xc0, 0xc0, 0x80, 0x80} // encodes 0x100000
	data = bytes.Repeat([]byte{'X'}, maxLength+1)
	n, err = ReadData(bytes.NewReader(append(prefix, data...)), nil)
	if n != 0 || err != ErrTooLong {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong)
	}
	// Test that 4 bytes don't work, even when they encode an integer that
	// would fix in 3 bytes.
	prefix = []byte{0xc0, 0x80, 0x80, 0x80} // encodes 0x0
	data = []byte{}
	n, err = ReadData(bytes.NewReader(append(prefix, data...)), nil)
	if n != 0 || err != ErrTooLong {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong)
	}

	// Do the same tests with padding lengths.
	data = []byte("hello")
	prefix = []byte{0x7f, 0xff, 0x7f} // encodes 0xfffff
	padding := bytes.Repeat([]byte{'X'}, maxLength)
	enc := bytes.NewBuffer(append(prefix, padding...))
	mustWriteData(enc, data)
	n, err = ReadData(enc, p[:])
	if err != nil {
		t.Fatalf("got error %v, expected %v", err, nil)
	}
	if !bytes.Equal(p[:n], data) {
		t.Fatalf("got <%x>, expected <%x>", p[:n], data)
	}
	prefix = []byte{0x40, 0xc0, 0x80, 0x80} // encodes 0x100000
	padding = bytes.Repeat([]byte{'X'}, maxLength+1)
	enc = bytes.NewBuffer(append(prefix, padding...))
	mustWriteData(enc, data)
	n, err = ReadData(enc, nil)
	if n != 0 || err != ErrTooLong {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong)
	}
	prefix = []byte{0x40, 0x80, 0x80, 0x80} // encodes 0x0
	padding = []byte{}
	enc = bytes.NewBuffer(append(prefix, padding...))
	mustWriteData(enc, data)
	n, err = ReadData(enc, nil)
	if n != 0 || err != ErrTooLong {
		t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong)
	}
}

// Test that WriteData and WritePadding only accept lengths that can be encoded
// in up to 3 bytes of length prefix.
func TestWriteLimits(t *testing.T) {
	maxLength := (0x3f << 14) | (0x7f << 7) | 0x7f
	var enc bytes.Buffer
	n, err := WriteData(&enc, bytes.Repeat([]byte{'X'}, maxLength))
	if n != maxLength+3 || err != nil {
		t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength, nil)
	}
	enc.Reset()
	n, err = WriteData(&enc, bytes.Repeat([]byte{'X'}, maxLength+1))
	if n != 0 || err != ErrTooLong {
		t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, 0, ErrTooLong)
	}

	// Padding gets an extra 3 bytes because the prefix is counted as part
	// of the length.
	enc.Reset()
	n, err = WritePadding(&enc, maxLength+3)
	if n != maxLength+3 || err != nil {
		t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength+3, nil)
	}
	// Writing a too-long padding is okay because WritePadding will break it
	// into smaller chunks.
	enc.Reset()
	n, err = WritePadding(&enc, maxLength+4)
	if n != maxLength+4 || err != nil {
		t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength+4, nil)
	}
}

// Test that WritePadding panics when given a negative length.
func TestNegativeLength(t *testing.T) {
	for _, n := range []int{-1, ^0} {
		var enc bytes.Buffer
		panicked, nn, err := testNegativeLengthSub(t, &enc, n)
		if !panicked {
			t.Fatalf("WritePadding(%d) returned (%d, %v) instead of panicking", n, nn, err)
		}
	}
}

// Calls WritePadding(w, n) and augments the return value with a flag indicating
// whether the call panicked.
func testNegativeLengthSub(t *testing.T, w io.Writer, n int) (panicked bool, nn int, err error) {
	defer func() {
		if r := recover(); r != nil {
			panicked = true
		}
	}()
	t.Helper()
	nn, err = WritePadding(w, n)
	return false, n, err
}

// Test that MaxDataForSize panics when given a 0 length.
func TestMaxDataForSizeZero(t *testing.T) {
	defer func() {
		if r := recover(); r == nil {
			t.Fatal("didn't panic")
		}
	}()
	MaxDataForSize(0)
}

// Test thresholds of available sizes for MaxDataForSize.
func TestMaxDataForSize(t *testing.T) {
	for _, test := range []struct {
		size     int
		expected int
	}{
		{0x01, 0x00},
		{0x02, 0x01},
		{0x3f, 0x3e},
		{0x40, 0x3e},
		{0x41, 0x3f},
		{0x1fff, 0x1ffd},
		{0x2000, 0x1ffd},
		{0x2001, 0x1ffe},
		{0xfffff, 0xffffc},
		{0x100000, 0xffffc},
		{0x100001, 0xffffc},
		{0x7fffffff, 0xffffc},
	} {
		max := MaxDataForSize(test.size)
		if max != test.expected {
			t.Fatalf("size %d, got %d, expected %d", test.size, max, test.expected)
		}
	}
}

// Test that ReadData truncates the data when the destination slice is too
// short.
func TestReadDataTruncate(t *testing.T) {
	var enc bytes.Buffer
	mustWriteData(&enc, []byte("12345678"))
	mustWriteData(&enc, []byte("abcdefgh"))
	var p [4]byte
	// First ReadData should return truncated "1234".
	n, err := ReadData(&enc, p[:])
	if err != io.ErrShortBuffer {
		t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer)
	}
	if !bytes.Equal(p[:n], []byte("1234")) {
		t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("1234"))
	}
	// Second ReadData should return truncated "abcd", not the rest of
	// "12345678".
	n, err = ReadData(&enc, p[:])
	if err != io.ErrShortBuffer {
		t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer)
	}
	if !bytes.Equal(p[:n], []byte("abcd")) {
		t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("abcd"))
	}
	// Last ReadData should give io.EOF.
	n, err = ReadData(&enc, p[:])
	if err != io.EOF {
		t.Fatalf("got error %v, expected %v", err, io.EOF)
	}
}

// Test that even when the result is truncated, ReadData fills the provided
// buffer as much as possible (and not stop at the boundary of an internal Read,
// say).
func TestReadDataTruncateFull(t *testing.T) {
	pr, pw := io.Pipe()
	go func() {
		// Send one data chunk that will be delivered across two Read
		// calls.
		pw.Write([]byte{0x8a, 'h', 'e', 'l', 'l', 'o'})
		pw.Write([]byte{'w', 'o', 'r', 'l', 'd'})
	}()
	var p [8]byte
	n, err := ReadData(pr, p[:])
	if err != io.ErrShortBuffer {
		t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer)
	}
	// Should not stop after "hello".
	if !bytes.Equal(p[:n], []byte("hellowor")) {
		t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("hellowor"))
	}
}

// Benchmark the ReadData function when reading from a stream of data packets of
// different sizes.
func BenchmarkReadData(b *testing.B) {
	pr, pw := io.Pipe()
	go func() {
		for {
			for length := 0; length < 128; length++ {
				WriteData(pw, paddingBuffer[:length])
			}
		}
	}()

	var p [128]byte
	for i := 0; i < b.N; i++ {
		_, err := ReadData(pr, p[:])
		if err != nil {
			b.Fatal(err)
		}
	}
}
0707010000003F000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001E00000000snowflake-2.11.0/common/event07070100000040000081A400000000000000000000000167D9BD4E0000035D000000000000000000000000000000000000002500000000snowflake-2.11.0/common/event/bus.gopackage event

import "sync"

func NewSnowflakeEventDispatcher() SnowflakeEventDispatcher {
	return &eventBus{lock: &sync.Mutex{}}
}

type eventBus struct {
	lock      *sync.Mutex
	listeners []SnowflakeEventReceiver
}

func (e *eventBus) OnNewSnowflakeEvent(event SnowflakeEvent) {
	e.lock.Lock()
	defer e.lock.Unlock()
	for _, v := range e.listeners {
		v.OnNewSnowflakeEvent(event)
	}
}

func (e *eventBus) AddSnowflakeEventListener(receiver SnowflakeEventReceiver) {
	e.lock.Lock()
	defer e.lock.Unlock()
	e.listeners = append(e.listeners, receiver)
}

func (e *eventBus) RemoveSnowflakeEventListener(receiver SnowflakeEventReceiver) {
	e.lock.Lock()
	defer e.lock.Unlock()
	var newListeners []SnowflakeEventReceiver
	for _, v := range e.listeners {
		if v != receiver {
			newListeners = append(newListeners, v)
		}
	}
	e.listeners = newListeners
	return
}
07070100000041000081A400000000000000000000000167D9BD4E00000375000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/event/bus_test.gopackage event

import (
	"github.com/stretchr/testify/assert"
	"testing"
)

type stubReceiver struct {
	counter int
}

func (s *stubReceiver) OnNewSnowflakeEvent(event SnowflakeEvent) {
	s.counter++
}

func TestBusDispatch(t *testing.T) {
	EventBus := NewSnowflakeEventDispatcher()
	StubReceiverA := &stubReceiver{}
	StubReceiverB := &stubReceiver{}
	EventBus.AddSnowflakeEventListener(StubReceiverA)
	EventBus.AddSnowflakeEventListener(StubReceiverB)
	assert.Equal(t, 0, StubReceiverA.counter)
	assert.Equal(t, 0, StubReceiverB.counter)
	EventBus.OnNewSnowflakeEvent(EventOnSnowflakeConnected{})
	assert.Equal(t, 1, StubReceiverA.counter)
	assert.Equal(t, 1, StubReceiverB.counter)
	EventBus.RemoveSnowflakeEventListener(StubReceiverB)
	EventBus.OnNewSnowflakeEvent(EventOnSnowflakeConnected{})
	assert.Equal(t, 2, StubReceiverA.counter)
	assert.Equal(t, 1, StubReceiverB.counter)

}
07070100000042000081A400000000000000000000000167D9BD4E00000EB4000000000000000000000000000000000000002B00000000snowflake-2.11.0/common/event/interface.gopackage event

import (
	"fmt"
	"time"

	"github.com/pion/webrtc/v4"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
)

type SnowflakeEvent interface {
	IsSnowflakeEvent()
	String() string
}

type EventOnOfferCreated struct {
	SnowflakeEvent
	WebRTCLocalDescription *webrtc.SessionDescription
	Error                  error
}

func (e EventOnOfferCreated) String() string {
	if e.Error != nil {
		scrubbed := safelog.Scrub([]byte(e.Error.Error()))
		return fmt.Sprintf("offer creation failure %s", scrubbed)
	}
	return "offer created"
}

type EventOnBrokerRendezvous struct {
	SnowflakeEvent
	WebRTCRemoteDescription *webrtc.SessionDescription
	Error                   error
}

func (e EventOnBrokerRendezvous) String() string {
	if e.Error != nil {
		scrubbed := safelog.Scrub([]byte(e.Error.Error()))
		return fmt.Sprintf("broker failure %s", scrubbed)
	}
	return "broker rendezvous peer received"
}

type EventOnSnowflakeConnected struct {
	SnowflakeEvent
}

func (e EventOnSnowflakeConnected) String() string {
	return "connected"
}

type EventOnSnowflakeConnectionFailed struct {
	SnowflakeEvent
	Error error
}

func (e EventOnSnowflakeConnectionFailed) String() string {
	scrubbed := safelog.Scrub([]byte(e.Error.Error()))
	return fmt.Sprintf("trying a new proxy: %s", scrubbed)
}

type EventOnProxyStarting struct {
	SnowflakeEvent
}

func (e EventOnProxyStarting) String() string {
	return "Proxy starting"
}

type EventOnProxyClientConnected struct {
	SnowflakeEvent
}

func (e EventOnProxyClientConnected) String() string {
	return fmt.Sprintf("client connected")
}

// The connection with the client has now been closed,
// after getting successfully established.
type EventOnProxyConnectionOver struct {
	SnowflakeEvent
	Country string
}

func (e EventOnProxyConnectionOver) String() string {
	return fmt.Sprintf("Proxy connection closed")
}

// Rendezvous with a client succeeded,
// but a data channel has not been created.
type EventOnProxyConnectionFailed struct {
	SnowflakeEvent
}

func (e EventOnProxyConnectionFailed) String() string {
	return "Failed to connect to the client"
}

type EventOnProxyStats struct {
	SnowflakeEvent
	// Completed successful connections.
	ConnectionCount int
	// Connections that failed to establish.
	FailedConnectionCount       uint
	InboundBytes, OutboundBytes int64
	InboundUnit, OutboundUnit   string
	SummaryInterval             time.Duration
}

func (e EventOnProxyStats) String() string {
	statString := fmt.Sprintf("In the last %v, there were %v completed successful connections. Traffic Relayed ↓ %v %v (%.2f %v%s), ↑ %v %v (%.2f %v%s).",
		e.SummaryInterval.String(), e.ConnectionCount,
		e.InboundBytes, e.InboundUnit, float64(e.InboundBytes)/e.SummaryInterval.Seconds(), e.InboundUnit, "/s",
		e.OutboundBytes, e.OutboundUnit, float64(e.OutboundBytes)/e.SummaryInterval.Seconds(), e.OutboundUnit, "/s")
	return statString
}

type EventOnCurrentNATTypeDetermined struct {
	SnowflakeEvent
	CurNATType string
}

func (e EventOnCurrentNATTypeDetermined) String() string {
	return fmt.Sprintf("NAT type: %v", e.CurNATType)
}

type SnowflakeEventReceiver interface {
	// OnNewSnowflakeEvent notify receiver about a new event
	// This method MUST not block
	OnNewSnowflakeEvent(event SnowflakeEvent)
}

type SnowflakeEventDispatcher interface {
	SnowflakeEventReceiver
	// AddSnowflakeEventListener allow receiver(s) to receive event notification
	// when OnNewSnowflakeEvent is called on the dispatcher.
	// Every event listener added will be called when an event is received by the dispatcher.
	// The order each listener is called is undefined.
	AddSnowflakeEventListener(receiver SnowflakeEventReceiver)
	RemoveSnowflakeEventListener(receiver SnowflakeEventReceiver)
}
07070100000043000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002100000000snowflake-2.11.0/common/messages07070100000044000081A400000000000000000000000167D9BD4E00001063000000000000000000000000000000000000002B00000000snowflake-2.11.0/common/messages/client.go//Package for communication with the snowflake broker

// import "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
package messages

import (
	"bytes"
	"encoding/json"
	"fmt"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat"
)

const ClientVersion = "1.0"

/* Client--Broker protocol v1.x specification:

All messages contain the version number
followed by a new line and then the message body
<message> := <version>\n<body>
<version> := <digit>.<digit>
<body> := <poll request>|<poll response>

There are two different types of body messages,
each encoded in JSON format

== ClientPollRequest ==
<poll request> :=
{
  offer: <sdp offer>
  [nat: (unknown|restricted|unrestricted)]
  [fingerprint: <fingerprint string>]
}

The NAT field is optional, and if it is missing a
value of "unknown" will be assumed.  The fingerprint
is also optional and, if absent, will be assigned the
fingerprint of the default bridge.

== ClientPollResponse ==
<poll response> :=
{
  [answer: <sdp answer>]
  [error: <error string>]
}

If the broker succeeded in matching the client with a proxy,
the answer field MUST contain a valid SDP answer, and the
error field MUST be empty. If the answer field is empty, the
error field MUST contain a string explaining with a reason
for the error.

*/

// The bridge fingerprint to assume, for client poll requests that do not
// specify a fingerprint.  Before #28651, there was only one bridge with one
// fingerprint, which all clients expected to be connected to implicitly.
// If a client is old enough that it does not specify a fingerprint, this is
// the fingerprint it expects.  Clients that do set a fingerprint in the
// SOCKS params will also be assumed to want to connect to the default bridge.
const defaultBridgeFingerprint = "2B280B23E1107BB62ABFC40DDCC8824814F80A72"

type ClientPollRequest struct {
	Offer       string `json:"offer"`
	NAT         string `json:"nat"`
	Fingerprint string `json:"fingerprint"`
}

// Encodes a poll message from a snowflake client
func (req *ClientPollRequest) EncodeClientPollRequest() ([]byte, error) {
	if req.Fingerprint == "" {
		req.Fingerprint = defaultBridgeFingerprint
	}
	body, err := json.Marshal(req)
	if err != nil {
		return nil, err
	}
	return append([]byte(ClientVersion+"\n"), body...), nil
}

// Decodes a poll message from a snowflake client
func DecodeClientPollRequest(data []byte) (*ClientPollRequest, error) {
	parts := bytes.SplitN(data, []byte("\n"), 2)

	if len(parts) < 2 {
		// no version number found
		return nil, fmt.Errorf("unsupported message version")
	}

	var message ClientPollRequest

	if string(parts[0]) != ClientVersion {
		return nil, fmt.Errorf("unsupported message version")
	}

	err := json.Unmarshal(parts[1], &message)
	if err != nil {
		return nil, err
	}

	if message.Offer == "" {
		return nil, fmt.Errorf("no supplied offer")
	}

	if message.Fingerprint == "" {
		message.Fingerprint = defaultBridgeFingerprint
	}

	if _, err := bridgefingerprint.FingerprintFromHexString(message.Fingerprint); err != nil {
		return nil, fmt.Errorf("cannot decode fingerprint")
	}

	switch message.NAT {
	case "":
		message.NAT = nat.NATUnknown
	case nat.NATUnknown:
	case nat.NATRestricted:
	case nat.NATUnrestricted:
	default:
		return nil, fmt.Errorf("invalid NAT type")
	}

	return &message, nil
}

type ClientPollResponse struct {
	Answer string `json:"answer,omitempty"`
	Error  string `json:"error,omitempty"`
}

// Encodes a poll response for a snowflake client
func (resp *ClientPollResponse) EncodePollResponse() ([]byte, error) {
	return json.Marshal(resp)
}

// Decodes a poll response for a snowflake client
// If the Error field is empty, the Answer should be non-empty
func DecodeClientPollResponse(data []byte) (*ClientPollResponse, error) {
	var message ClientPollResponse

	err := json.Unmarshal(data, &message)
	if err != nil {
		return nil, err
	}
	if message.Error == "" && message.Answer == "" {
		return nil, fmt.Errorf("received empty broker response")
	}

	return &message, nil
}
07070100000045000081A400000000000000000000000167D9BD4E0000024B000000000000000000000000000000000000002800000000snowflake-2.11.0/common/messages/ipc.gopackage messages

import (
	"errors"
)

type RendezvousMethod string

const (
	RendezvousHttp     RendezvousMethod = "http"
	RendezvousAmpCache RendezvousMethod = "ampcache"
	RendezvousSqs      RendezvousMethod = "sqs"
)

type Arg struct {
	Body             []byte
	RemoteAddr       string
	RendezvousMethod RendezvousMethod
}

var (
	ErrBadRequest = errors.New("bad request")
	ErrInternal   = errors.New("internal error")
	ErrExtraInfo  = errors.New("client sent extra info")

	StrTimedOut  = "timed out waiting for answer!"
	StrNoProxies = "no snowflake proxies currently available"
)
07070100000046000081A400000000000000000000000167D9BD4E00002DD3000000000000000000000000000000000000003200000000snowflake-2.11.0/common/messages/messages_test.gopackage messages

import (
	"encoding/json"
	"fmt"
	"testing"

	. "github.com/smartystreets/goconvey/convey"
)

func TestDecodeProxyPollRequest(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			sid       string
			proxyType string
			natType   string
			clients   int
			data      string
			err       error

			acceptedRelayPattern string
		}{
			{
				//Version 1.0 proxy message
				sid:       "ymbcCMto7KHNGYlp",
				proxyType: "unknown",
				natType:   "unknown",
				clients:   0,
				data:      `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`,
				err:       nil,
			},
			{
				//Version 1.1 proxy message
				sid:       "ymbcCMto7KHNGYlp",
				proxyType: "standalone",
				natType:   "unknown",
				clients:   0,
				data:      `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.1","Type":"standalone"}`,
				err:       nil,
			},
			{
				//Version 1.2 proxy message
				sid:       "ymbcCMto7KHNGYlp",
				proxyType: "standalone",
				natType:   "restricted",
				clients:   0,
				data:      `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted"}`,
				err:       nil,
			},
			{
				//Version 1.2 proxy message with clients
				sid:       "ymbcCMto7KHNGYlp",
				proxyType: "standalone",
				natType:   "restricted",
				clients:   24,
				data:      `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted","Clients":24}`,
				err:       nil,
			},
			{
				//Version 1.3 proxy message with clients and proxyURL
				sid:                  "ymbcCMto7KHNGYlp",
				proxyType:            "standalone",
				natType:              "restricted",
				clients:              24,
				acceptedRelayPattern: "snowfalke.torproject.org",
				data:                 `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted","Clients":24, "AcceptedRelayPattern":"snowfalke.torproject.org"}`,
				err:                  nil,
			},
			{
				//Version 0.X proxy message:
				sid:       "",
				proxyType: "",
				natType:   "",
				clients:   0,
				data:      "",
				err:       &json.SyntaxError{},
			},
			{
				sid:       "",
				proxyType: "",
				natType:   "",
				clients:   0,
				data:      `{"Sid":"ymbcCMto7KHNGYlp"}`,
				err:       fmt.Errorf(""),
			},
			{
				sid:       "",
				proxyType: "",
				natType:   "",
				clients:   0,
				data:      "{}",
				err:       fmt.Errorf(""),
			},
			{
				sid:       "",
				proxyType: "",
				natType:   "",
				clients:   0,
				data:      `{"Version":"1.0"}`,
				err:       fmt.Errorf(""),
			},
			{
				sid:       "",
				proxyType: "",
				natType:   "",
				clients:   0,
				data:      `{"Version":"2.0"}`,
				err:       fmt.Errorf(""),
			},
		} {
			sid, proxyType, natType, clients, relayPattern, _, err := DecodeProxyPollRequestWithRelayPrefix([]byte(test.data))
			So(sid, ShouldResemble, test.sid)
			So(proxyType, ShouldResemble, test.proxyType)
			So(natType, ShouldResemble, test.natType)
			So(clients, ShouldEqual, test.clients)
			So(relayPattern, ShouldResemble, test.acceptedRelayPattern)
			So(err, ShouldHaveSameTypeAs, test.err)
		}

	})
}

func TestEncodeProxyPollRequests(t *testing.T) {
	Convey("Context", t, func() {
		b, err := EncodeProxyPollRequest("ymbcCMto7KHNGYlp", "standalone", "unknown", 16)
		So(err, ShouldBeNil)
		sid, proxyType, natType, clients, err := DecodeProxyPollRequest(b)
		So(sid, ShouldEqual, "ymbcCMto7KHNGYlp")
		So(proxyType, ShouldEqual, "standalone")
		So(natType, ShouldEqual, "unknown")
		So(clients, ShouldEqual, 16)
		So(err, ShouldBeNil)
	})
}

func TestDecodeProxyPollResponse(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			offer    string
			data     string
			relayURL string
			err      error
		}{
			{
				offer: "fake offer",
				data:  `{"Status":"client match","Offer":"fake offer","NAT":"unknown"}`,
				err:   nil,
			},
			{
				offer:    "fake offer",
				data:     `{"Status":"client match","Offer":"fake offer","NAT":"unknown", "RelayURL":"wss://snowflake.torproject.org/proxy"}`,
				relayURL: "wss://snowflake.torproject.org/proxy",
				err:      nil,
			},
			{
				offer: "",
				data:  `{"Status":"no match"}`,
				err:   nil,
			},
			{
				offer: "",
				data:  `{"Status":"client match"}`,
				err:   fmt.Errorf("no supplied offer"),
			},
			{
				offer: "",
				data:  `{"Test":"test"}`,
				err:   fmt.Errorf(""),
			},
		} {
			offer, _, relayURL, err := DecodePollResponseWithRelayURL([]byte(test.data))
			So(err, ShouldHaveSameTypeAs, test.err)
			So(offer, ShouldResemble, test.offer)
			So(relayURL, ShouldResemble, test.relayURL)
		}

	})
}

func TestEncodeProxyPollResponse(t *testing.T) {
	Convey("Context", t, func() {
		b, err := EncodePollResponse("fake offer", true, "restricted")
		So(err, ShouldBeNil)
		offer, natType, err := DecodePollResponse(b)
		So(offer, ShouldEqual, "fake offer")
		So(natType, ShouldEqual, "restricted")
		So(err, ShouldBeNil)

		b, err = EncodePollResponse("", false, "unknown")
		So(err, ShouldBeNil)
		offer, natType, err = DecodePollResponse(b)
		So(offer, ShouldEqual, "")
		So(natType, ShouldEqual, "unknown")
		So(err, ShouldBeNil)
	})
}

func TestEncodeProxyPollResponseWithProxyURL(t *testing.T) {
	Convey("Context", t, func() {
		b, err := EncodePollResponseWithRelayURL("fake offer", true, "restricted", "wss://test/", "")
		So(err, ShouldBeNil)
		offer, natType, err := DecodePollResponse(b)
		So(err, ShouldNotBeNil)

		offer, natType, relay, err := DecodePollResponseWithRelayURL(b)
		So(offer, ShouldEqual, "fake offer")
		So(natType, ShouldEqual, "restricted")
		So(relay, ShouldEqual, "wss://test/")
		So(err, ShouldBeNil)

		b, err = EncodePollResponse("", false, "unknown")
		So(err, ShouldBeNil)
		offer, natType, relay, err = DecodePollResponseWithRelayURL(b)
		So(offer, ShouldEqual, "")
		So(natType, ShouldEqual, "unknown")
		So(err, ShouldBeNil)

		b, err = EncodePollResponseWithRelayURL("fake offer", false, "restricted", "wss://test/", "test error reason")
		So(err, ShouldBeNil)
		offer, natType, relay, err = DecodePollResponseWithRelayURL(b)
		So(err, ShouldNotBeNil)
		So(err.Error(), ShouldContainSubstring, "test error reason")
	})
}
func TestDecodeProxyAnswerRequest(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			answer string
			sid    string
			data   string
			err    error
		}{
			{
				"test",
				"test",
				`{"Version":"1.0","Sid":"test","Answer":"test"}`,
				nil,
			},
			{
				"",
				"",
				`{"type":"offer","sdp":"v=0\r\no=- 4358805017720277108 2 IN IP4 [scrubbed]\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 [scrubbed]\r\na=candidate:3769337065 1 udp 2122260223 [scrubbed] 56688 typ host generation 0 network-id 1 network-cost 50\r\na=candidate:2921887769 1 tcp 1518280447 [scrubbed] 35441 typ host tcptype passive generation 0 network-id 1 network-cost 50\r\na=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"}`,
				fmt.Errorf(""),
			},
			{
				"",
				"",
				`{"Version":"1.0","Answer":"test"}`,
				fmt.Errorf(""),
			},
			{
				"",
				"",
				`{"Version":"1.0","Sid":"test"}`,
				fmt.Errorf(""),
			},
		} {
			answer, sid, err := DecodeAnswerRequest([]byte(test.data))
			So(answer, ShouldResemble, test.answer)
			So(sid, ShouldResemble, test.sid)
			So(err, ShouldHaveSameTypeAs, test.err)
		}

	})
}

func TestEncodeProxyAnswerRequest(t *testing.T) {
	Convey("Context", t, func() {
		b, err := EncodeAnswerRequest("test answer", "test sid")
		So(err, ShouldBeNil)
		answer, sid, err := DecodeAnswerRequest(b)
		So(answer, ShouldEqual, "test answer")
		So(sid, ShouldEqual, "test sid")
		So(err, ShouldBeNil)
	})
}

func TestDecodeProxyAnswerResponse(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			success bool
			data    string
			err     error
		}{
			{
				true,
				`{"Status":"success"}`,
				nil,
			},
			{
				false,
				`{"Status":"client gone"}`,
				nil,
			},
			{
				false,
				`{"Test":"test"}`,
				fmt.Errorf(""),
			},
		} {
			success, err := DecodeAnswerResponse([]byte(test.data))
			So(success, ShouldResemble, test.success)
			So(err, ShouldHaveSameTypeAs, test.err)
		}

	})
}

func TestEncodeProxyAnswerResponse(t *testing.T) {
	Convey("Context", t, func() {
		b, err := EncodeAnswerResponse(true)
		So(err, ShouldBeNil)
		success, err := DecodeAnswerResponse(b)
		So(success, ShouldEqual, true)
		So(err, ShouldBeNil)

		b, err = EncodeAnswerResponse(false)
		So(err, ShouldBeNil)
		success, err = DecodeAnswerResponse(b)
		So(success, ShouldEqual, false)
		So(err, ShouldBeNil)
	})
}

func TestDecodeClientPollRequest(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			natType string
			offer   string
			data    string
			err     error
		}{
			{
				//version 1.0 client message
				"unknown",
				"fake",
				`1.0
{"nat":"unknown","offer":"fake"}`,
				nil,
			},
			{
				//version 1.0 client message
				"unknown",
				"fake",
				`1.0
{"offer":"fake"}`,
				nil,
			},
			{
				//unknown version
				"",
				"",
				`{"version":"2.0"}`,
				fmt.Errorf(""),
			},
			{
				//no offer
				"",
				"",
				`1.0
{"nat":"unknown"}`,
				fmt.Errorf(""),
			},
		} {
			req, err := DecodeClientPollRequest([]byte(test.data))
			So(err, ShouldHaveSameTypeAs, test.err)
			if test.err == nil {
				So(req.NAT, ShouldResemble, test.natType)
				So(req.Offer, ShouldResemble, test.offer)
			}
		}

	})
}

func TestEncodeClientPollRequests(t *testing.T) {
	Convey("Context", t, func() {
		for i, test := range []struct {
			natType     string
			offer       string
			fingerprint string
			err         error
		}{
			{
				"unknown",
				"fake",
				"",
				nil,
			},
			{
				"unknown",
				"fake",
				defaultBridgeFingerprint,
				nil,
			},
			{
				"unknown",
				"fake",
				"123123",
				fmt.Errorf(""),
			},
		} {
			req1 := &ClientPollRequest{
				NAT:         test.natType,
				Offer:       test.offer,
				Fingerprint: test.fingerprint,
			}
			b, err := req1.EncodeClientPollRequest()
			So(err, ShouldBeNil)
			req2, err := DecodeClientPollRequest(b)
			So(err, ShouldHaveSameTypeAs, test.err)
			if test.err == nil {
				So(req2.Offer, ShouldEqual, req1.Offer)
				So(req2.NAT, ShouldEqual, req1.NAT)
				fingerprint := test.fingerprint
				if i == 0 {
					fingerprint = defaultBridgeFingerprint
				}
				So(req2.Fingerprint, ShouldEqual, fingerprint)
			}
		}
	})
}

func TestDecodeClientPollResponse(t *testing.T) {
	Convey("Context", t, func() {
		for _, test := range []struct {
			answer string
			msg    string
			data   string
		}{
			{
				"fake answer",
				"",
				`{"answer":"fake answer"}`,
			},
			{
				"",
				"no snowflakes",
				`{"error":"no snowflakes"}`,
			},
		} {
			resp, err := DecodeClientPollResponse([]byte(test.data))
			So(err, ShouldBeNil)
			So(resp.Answer, ShouldResemble, test.answer)
			So(resp.Error, ShouldResemble, test.msg)
		}

	})
}

func TestEncodeClientPollResponse(t *testing.T) {
	Convey("Context", t, func() {
		resp1 := &ClientPollResponse{
			Answer: "fake answer",
		}
		b, err := resp1.EncodePollResponse()
		So(err, ShouldBeNil)
		resp2, err := DecodeClientPollResponse(b)
		So(err, ShouldBeNil)
		So(resp1, ShouldResemble, resp2)

		resp1 = &ClientPollResponse{
			Error: "failed",
		}
		b, err = resp1.EncodePollResponse()
		So(err, ShouldBeNil)
		resp2, err = DecodeClientPollResponse(b)
		So(err, ShouldBeNil)
		So(resp1, ShouldResemble, resp2)
	})
}
07070100000047000081A400000000000000000000000167D9BD4E00001BDC000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/messages/proxy.go//Package for communication with the snowflake broker

// import "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
package messages

import (
	"encoding/json"
	"errors"
	"fmt"
	"strings"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat"
)

const (
	version      = "1.3"
	ProxyUnknown = "unknown"
)

var KnownProxyTypes = map[string]bool{
	"standalone": true,
	"webext":     true,
	"badge":      true,
	"iptproxy":   true,
}

/* Version 1.3 specification:

== ProxyPollRequest ==
{
  Sid: [generated session id of proxy],
  Version: 1.3,
  Type: ["badge"|"webext"|"standalone"],
  NAT: ["unknown"|"restricted"|"unrestricted"],
  Clients: [number of current clients, rounded down to multiples of 8],
  AcceptedRelayPattern: [a pattern representing accepted set of relay domains]
}

== ProxyPollResponse ==
1) If a client is matched:
HTTP 200 OK
{
  Status: "client match",
  {
    type: offer,
    sdp: [WebRTC SDP]
  },
  NAT: ["unknown"|"restricted"|"unrestricted"],
  RelayURL: [the WebSocket URL proxy should connect to relay Snowflake traffic]
}

2) If a client is not matched:
HTTP 200 OK

{
    Status: "no match"
}

3) If the request is malformed:
HTTP 400 BadRequest

== ProxyAnswerRequest ==
{
  Sid: [generated session id of proxy],
  Version: 1.3,
  Answer:
  {
    type: answer,
    sdp: [WebRTC SDP]
  }
}

== ProxyAnswerResponse ==
1) If the client retrieved the answer:
HTTP 200 OK

{
  Status: "success"
}

2) If the client left:
HTTP 200 OK

{
  Status: "client gone"
}

3) If the request is malformed:
HTTP 400 BadRequest

*/

type ProxyPollRequest struct {
	Sid     string
	Version string
	Type    string
	NAT     string
	Clients int

	AcceptedRelayPattern *string
}

func EncodeProxyPollRequest(sid string, proxyType string, natType string, clients int) ([]byte, error) {
	return EncodeProxyPollRequestWithRelayPrefix(sid, proxyType, natType, clients, "")
}

func EncodeProxyPollRequestWithRelayPrefix(sid string, proxyType string, natType string, clients int, relayPattern string) ([]byte, error) {
	return json.Marshal(ProxyPollRequest{
		Sid:                  sid,
		Version:              version,
		Type:                 proxyType,
		NAT:                  natType,
		Clients:              clients,
		AcceptedRelayPattern: &relayPattern,
	})
}

func DecodeProxyPollRequest(data []byte) (sid string, proxyType string, natType string, clients int, err error) {
	var relayPrefix string
	sid, proxyType, natType, clients, relayPrefix, _, err = DecodeProxyPollRequestWithRelayPrefix(data)
	if relayPrefix != "" {
		return "", "", "", 0, ErrExtraInfo
	}
	return
}

// Decodes a poll message from a snowflake proxy and returns the
// sid, proxy type, nat type and clients of the proxy on success
// and an error if it failed
func DecodeProxyPollRequestWithRelayPrefix(data []byte) (
	sid string, proxyType string, natType string, clients int, relayPrefix string, relayPrefixAware bool, err error) {
	var message ProxyPollRequest

	err = json.Unmarshal(data, &message)
	if err != nil {
		return
	}

	majorVersion := strings.Split(message.Version, ".")[0]
	if majorVersion != "1" {
		err = fmt.Errorf("using unknown version")
		return
	}

	// Version 1.x requires an Sid
	if message.Sid == "" {
		err = fmt.Errorf("no supplied session id")
		return
	}

	switch message.NAT {
	case "":
		message.NAT = nat.NATUnknown
	case nat.NATUnknown:
	case nat.NATRestricted:
	case nat.NATUnrestricted:
	default:
		err = fmt.Errorf("invalid NAT type")
		return
	}

	// we don't reject polls with an unknown proxy type because we encourage
	// projects that embed proxy code to include their own type
	if !KnownProxyTypes[message.Type] {
		message.Type = ProxyUnknown
	}
	var acceptedRelayPattern = ""
	if message.AcceptedRelayPattern != nil {
		acceptedRelayPattern = *message.AcceptedRelayPattern
	}
	return message.Sid, message.Type, message.NAT, message.Clients,
		acceptedRelayPattern, message.AcceptedRelayPattern != nil, nil
}

type ProxyPollResponse struct {
	Status string
	Offer  string
	NAT    string

	RelayURL string
}

func EncodePollResponse(offer string, success bool, natType string) ([]byte, error) {
	return EncodePollResponseWithRelayURL(offer, success, natType, "", "no match")
}

func EncodePollResponseWithRelayURL(offer string, success bool, natType, relayURL, failReason string) ([]byte, error) {
	if success {
		return json.Marshal(ProxyPollResponse{
			Status:   "client match",
			Offer:    offer,
			NAT:      natType,
			RelayURL: relayURL,
		})

	}
	return json.Marshal(ProxyPollResponse{
		Status: failReason,
	})
}
func DecodePollResponse(data []byte) (offer string, natType string, err error) {
	offer, natType, relayURL, err := DecodePollResponseWithRelayURL(data)
	if relayURL != "" {
		return "", "", ErrExtraInfo
	}
	return offer, natType, err
}

// Decodes a poll response from the broker and returns an offer and the client's NAT type
// If there is a client match, the returned offer string will be non-empty
func DecodePollResponseWithRelayURL(data []byte) (
	offer string,
	natType string,
	relayURL string,
	err_ error,
) {
	var message ProxyPollResponse

	err := json.Unmarshal(data, &message)
	if err != nil {
		return "", "", "", err
	}
	if message.Status == "" {
		return "", "", "", fmt.Errorf("received invalid data")
	}

	err = nil
	if message.Status == "client match" {
		if message.Offer == "" {
			return "", "", "", fmt.Errorf("no supplied offer")
		}
	} else {
		message.Offer = ""
		if message.Status != "no match" {
			err = errors.New(message.Status)
		}
	}

	natType = message.NAT
	if natType == "" {
		natType = "unknown"
	}

	return message.Offer, natType, message.RelayURL, err
}

type ProxyAnswerRequest struct {
	Version string
	Sid     string
	Answer  string
}

func EncodeAnswerRequest(answer string, sid string) ([]byte, error) {
	return json.Marshal(ProxyAnswerRequest{
		Version: version,
		Sid:     sid,
		Answer:  answer,
	})
}

// Returns the sdp answer and proxy sid
func DecodeAnswerRequest(data []byte) (answer string, sid string, err error) {
	var message ProxyAnswerRequest

	err = json.Unmarshal(data, &message)
	if err != nil {
		return "", "", err
	}

	majorVersion := strings.Split(message.Version, ".")[0]
	if majorVersion != "1" {
		return "", "", fmt.Errorf("using unknown version")
	}

	if message.Sid == "" || message.Answer == "" {
		return "", "", fmt.Errorf("no supplied sid or answer")
	}

	return message.Answer, message.Sid, nil
}

type ProxyAnswerResponse struct {
	Status string
}

func EncodeAnswerResponse(success bool) ([]byte, error) {
	if success {
		return json.Marshal(ProxyAnswerResponse{
			Status: "success",
		})

	}
	return json.Marshal(ProxyAnswerResponse{
		Status: "client gone",
	})
}

func DecodeAnswerResponse(data []byte) (bool, error) {
	var message ProxyAnswerResponse
	var success bool

	err := json.Unmarshal(data, &message)
	if err != nil {
		return success, err
	}
	if message.Status == "" {
		return success, fmt.Errorf("received invalid data")
	}

	if message.Status == "success" {
		success = true
	}

	return success, nil
}
07070100000048000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002400000000snowflake-2.11.0/common/namematcher07070100000049000081A400000000000000000000000167D9BD4E000002A2000000000000000000000000000000000000002F00000000snowflake-2.11.0/common/namematcher/matcher.gopackage namematcher

import "strings"

func NewNameMatcher(rule string) NameMatcher {
	rule = strings.TrimSuffix(rule, "$")
	return NameMatcher{suffix: strings.TrimPrefix(rule, "^"), exact: strings.HasPrefix(rule, "^")}
}

func IsValidRule(rule string) bool {
	return strings.HasSuffix(rule, "$")
}

type NameMatcher struct {
	exact  bool
	suffix string
}

func (m *NameMatcher) IsSupersetOf(matcher NameMatcher) bool {
	if m.exact {
		return matcher.exact && m.suffix == matcher.suffix
	}
	return strings.HasSuffix(matcher.suffix, m.suffix)
}

func (m *NameMatcher) IsMember(s string) bool {
	if m.exact {
		return s == m.suffix
	}
	return strings.HasSuffix(s, m.suffix)
}
0707010000004A000081A400000000000000000000000167D9BD4E00000889000000000000000000000000000000000000003400000000snowflake-2.11.0/common/namematcher/matcher_test.gopackage namematcher

import "testing"

import . "github.com/smartystreets/goconvey/convey"

func TestMatchMember(t *testing.T) {
	testingVector := []struct {
		matcher string
		target  string
		expects bool
	}{
		{matcher: "", target: "", expects: true},
		{matcher: "^snowflake.torproject.net$", target: "snowflake.torproject.net", expects: true},
		{matcher: "^snowflake.torproject.net$", target: "faketorproject.net", expects: false},
		{matcher: "snowflake.torproject.net$", target: "faketorproject.net", expects: false},
		{matcher: "snowflake.torproject.net$", target: "snowflake.torproject.net", expects: true},
		{matcher: "snowflake.torproject.net$", target: "imaginary-01-snowflake.torproject.net", expects: true},
		{matcher: "snowflake.torproject.net$", target: "imaginary-aaa-snowflake.torproject.net", expects: true},
		{matcher: "snowflake.torproject.net$", target: "imaginary-aaa-snowflake.faketorproject.net", expects: false},
	}
	for _, v := range testingVector {
		t.Run(v.matcher+"<>"+v.target, func(t *testing.T) {
			Convey("test", t, func() {
				matcher := NewNameMatcher(v.matcher)
				So(matcher.IsMember(v.target), ShouldEqual, v.expects)
			})
		})
	}
}

func TestMatchSubset(t *testing.T) {
	testingVector := []struct {
		matcher string
		target  string
		expects bool
	}{
		{matcher: "", target: "", expects: true},
		{matcher: "^snowflake.torproject.net$", target: "^snowflake.torproject.net$", expects: true},
		{matcher: "snowflake.torproject.net$", target: "^snowflake.torproject.net$", expects: true},
		{matcher: "snowflake.torproject.net$", target: "snowflake.torproject.net$", expects: true},
		{matcher: "snowflake.torproject.net$", target: "testing-snowflake.torproject.net$", expects: true},
		{matcher: "snowflake.torproject.net$", target: "^testing-snowflake.torproject.net$", expects: true},
		{matcher: "snowflake.torproject.net$", target: "", expects: false},
	}
	for _, v := range testingVector {
		t.Run(v.matcher+"<>"+v.target, func(t *testing.T) {
			Convey("test", t, func() {
				matcher := NewNameMatcher(v.matcher)
				target := NewNameMatcher(v.target)
				So(matcher.IsSupersetOf(target), ShouldEqual, v.expects)
			})
		})
	}
}
0707010000004B000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001C00000000snowflake-2.11.0/common/nat0707010000004C000081A400000000000000000000000167D9BD4E00001E3C000000000000000000000000000000000000002300000000snowflake-2.11.0/common/nat/nat.go/*
The majority of this code is taken from a utility I wrote for pion/stun
https://github.com/pion/stun/blob/master/cmd/stun-nat-behaviour/main.go

Copyright 2018 Pion LLC

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

package nat

import (
	"errors"
	"fmt"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy"
	"log"
	"net"
	"net/url"
	"time"

	"github.com/pion/stun/v3"
)

var ErrTimedOut = errors.New("timed out waiting for response")

const (
	NATUnknown      = "unknown"
	NATRestricted   = "restricted"
	NATUnrestricted = "unrestricted"
)

// Deprecated: Use CheckIfRestrictedNATWithProxy Instead.
func CheckIfRestrictedNAT(server string) (bool, error) {
	return CheckIfRestrictedNATWithProxy(server, nil)
}

// CheckIfRestrictedNATWithProxy checks the NAT mapping and filtering
// behaviour and returns true if the NAT is restrictive
// (address-dependent mapping and/or port-dependent filtering)
// and false if the NAT is unrestrictive (meaning it
// will work with most other NATs),
func CheckIfRestrictedNATWithProxy(server string, proxy *url.URL) (bool, error) {
	return isRestrictedMapping(server, proxy)
}

// Performs two tests from RFC 5780 to determine whether the mapping type
// of the client's NAT is address-independent or address-dependent
// Returns true if the mapping is address-dependent and false otherwise
func isRestrictedMapping(addrStr string, proxy *url.URL) (bool, error) {
	var xorAddr1 stun.XORMappedAddress
	var xorAddr2 stun.XORMappedAddress

	mapTestConn, err := connect(addrStr, proxy)
	if err != nil {
		return false, fmt.Errorf("Error creating STUN connection: %w", err)
	}

	defer mapTestConn.Close()

	// Test I: Regular binding request
	message := stun.MustBuild(stun.TransactionID, stun.BindingRequest)

	resp, err := mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr)
	if err != nil {
		return false, fmt.Errorf("Error completing roundtrip map test: %w", err)
	}

	// Decoding XOR-MAPPED-ADDRESS attribute from message.
	if err = xorAddr1.GetFrom(resp); err != nil {
		return false, fmt.Errorf("Error retrieving XOR-MAPPED-ADDRESS resonse: %w", err)
	}

	// Decoding OTHER-ADDRESS attribute from message.
	var otherAddr stun.OtherAddress
	if err = otherAddr.GetFrom(resp); err != nil {
		return false, fmt.Errorf("NAT discovery feature not supported: %w", err)
	}

	if err = mapTestConn.AddOtherAddr(otherAddr.String()); err != nil {
		return false, fmt.Errorf("Error resolving address %s: %w", otherAddr.String(), err)
	}

	// Test II: Send binding request to other address
	resp, err = mapTestConn.RoundTrip(message, mapTestConn.OtherAddr)
	if err != nil {
		return false, fmt.Errorf("Error retrieveing server response: %w", err)
	}

	// Decoding XOR-MAPPED-ADDRESS attribute from message.
	if err = xorAddr2.GetFrom(resp); err != nil {
		return false, fmt.Errorf("Error retrieving XOR-MAPPED-ADDRESS resonse: %w", err)
	}

	return xorAddr1.String() != xorAddr2.String(), nil

}

// Performs two tests from RFC 5780 to determine whether the filtering type
// of the client's NAT is port-dependent.
// Returns true if the filtering is port-dependent and false otherwise
// Note: This function is no longer used because a client's NAT type is
// determined only by their mapping type, but the functionality might
// be useful in the future and remains here.
func isRestrictedFiltering(addrStr string, proxy *url.URL) (bool, error) {
	var xorAddr stun.XORMappedAddress

	mapTestConn, err := connect(addrStr, proxy)
	if err != nil {
		log.Printf("Error creating STUN connection: %s", err.Error())
		return false, err
	}

	defer mapTestConn.Close()

	// Test I: Regular binding request
	message := stun.MustBuild(stun.TransactionID, stun.BindingRequest)

	resp, err := mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr)
	if err == ErrTimedOut {
		log.Printf("Error: no response from server")
		return false, err
	}
	if err != nil {
		log.Printf("Error: %s", err.Error())
		return false, err
	}

	// Decoding XOR-MAPPED-ADDRESS attribute from message.
	if err = xorAddr.GetFrom(resp); err != nil {
		log.Printf("Error retrieving XOR-MAPPED-ADDRESS from resonse: %s", err.Error())
		return false, err
	}

	// Test III: Request port change
	message.Add(stun.AttrChangeRequest, []byte{0x00, 0x00, 0x00, 0x02})

	_, err = mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr)
	if err != ErrTimedOut && err != nil {
		// something else went wrong
		log.Printf("Error reading response from server: %s", err.Error())
		return false, err
	}

	return err == ErrTimedOut, nil
}

// Given an address string, returns a StunServerConn
func connect(addrStr string, proxyAddr *url.URL) (*StunServerConn, error) {
	// Creating a "connection" to STUN server.
	var conn net.PacketConn

	ResolveUDPAddr := net.ResolveUDPAddr
	if proxyAddr != nil {
		socksClient := proxy.NewSocks5UDPClient(proxyAddr)
		ResolveUDPAddr = socksClient.ResolveUDPAddr
	}

	addr, err := ResolveUDPAddr("udp4", addrStr)
	if err != nil {
		log.Printf("Error resolving address: %s\n", err.Error())
		return nil, err
	}

	if proxyAddr == nil {
		c, err := net.ListenUDP("udp4", nil)
		if err != nil {
			return nil, err
		}
		conn = c
	} else {
		socksClient := proxy.NewSocks5UDPClient(proxyAddr)
		c, err := socksClient.ListenPacket("udp", nil)
		if err != nil {
			return nil, err
		}
		conn = c
	}

	mChan := listen(conn)

	return &StunServerConn{
		conn:        conn,
		PrimaryAddr: addr,
		messageChan: mChan,
	}, nil
}

type StunServerConn struct {
	conn        net.PacketConn
	PrimaryAddr *net.UDPAddr
	OtherAddr   *net.UDPAddr
	messageChan chan *stun.Message
}

func (c *StunServerConn) Close() {
	c.conn.Close()
}

func (c *StunServerConn) RoundTrip(msg *stun.Message, addr net.Addr) (*stun.Message, error) {
	_, err := c.conn.WriteTo(msg.Raw, addr)
	if err != nil {
		return nil, err
	}

	// Wait for response or timeout
	select {
	case m, ok := <-c.messageChan:
		if !ok {
			return nil, fmt.Errorf("error reading from messageChan")
		}
		return m, nil
	case <-time.After(10 * time.Second):
		return nil, ErrTimedOut
	}
}

func (c *StunServerConn) AddOtherAddr(addrStr string) error {
	addr2, err := net.ResolveUDPAddr("udp4", addrStr)
	if err != nil {
		return err
	}
	c.OtherAddr = addr2
	return nil
}

// taken from https://github.com/pion/stun/blob/master/cmd/stun-traversal/main.go
func listen(conn net.PacketConn) chan *stun.Message {
	messages := make(chan *stun.Message)
	go func() {
		for {
			buf := make([]byte, 1024)

			n, _, err := conn.ReadFrom(buf)
			if err != nil {
				close(messages)
				return
			}
			buf = buf[:n]

			m := new(stun.Message)
			m.Raw = buf
			err = m.Decode()
			if err != nil {
				close(messages)
				return
			}

			messages <- m
		}
	}()
	return messages
}
0707010000004D000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001E00000000snowflake-2.11.0/common/proxy0707010000004E000081A400000000000000000000000167D9BD4E0000012B000000000000000000000000000000000000002700000000snowflake-2.11.0/common/proxy/check.gopackage proxy

import (
	"errors"
	"net/url"
	"strings"
)

var errUnsupportedProxyType = errors.New("unsupported proxy type")

func CheckProxyProtocolSupport(proxy *url.URL) error {
	switch strings.ToLower(proxy.Scheme) {
	case "socks5":
		return nil
	default:
		return errUnsupportedProxyType
	}
}
0707010000004F000081A400000000000000000000000167D9BD4E000018A0000000000000000000000000000000000000002800000000snowflake-2.11.0/common/proxy/client.gopackage proxy

import (
	"context"
	"errors"
	"log"
	"net"
	"net/url"
	"strconv"
	"time"

	"github.com/miekg/dns"
	"github.com/pion/transport/v3"
	"github.com/txthinking/socks5"
)

func NewSocks5UDPClient(addr *url.URL) SocksClient {
	return SocksClient{addr: addr}
}

type SocksClient struct {
	addr *url.URL
}

type SocksConn struct {
	net.Conn
	socks5Client *socks5.Client
}

func (s SocksConn) SetReadBuffer(bytes int) error {
	return nil
}

func (s SocksConn) SetWriteBuffer(bytes int) error {
	return nil
}

func (s SocksConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
	var buf [2000]byte
	n, err = s.Conn.Read(buf[:])
	if err != nil {
		return 0, nil, err
	}
	Datagram, err := socks5.NewDatagramFromBytes(buf[:n])
	if err != nil {
		return 0, nil, err
	}
	addr, err = net.ResolveUDPAddr("udp", Datagram.Address())
	if err != nil {
		return 0, nil, err
	}
	n = copy(b, Datagram.Data)
	if n < len(Datagram.Data) {
		return 0, nil, errors.New("short buffer")
	}
	return len(Datagram.Data), addr, nil
}

func (s SocksConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *net.UDPAddr, err error) {
	panic("unimplemented")
}

func (s SocksConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) {

	a, addrb, portb, err := socks5.ParseAddress(addr.String())
	if err != nil {
		return 0, err
	}
	packet := socks5.NewDatagram(a, addrb, portb, b)
	_, err = s.Conn.Write(packet.Bytes())
	if err != nil {
		return 0, err
	}
	return len(b), nil
}

func (s SocksConn) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (n, oobn int, err error) {
	panic("unimplemented")
}

func (sc *SocksClient) ListenPacket(network string, locAddr *net.UDPAddr) (transport.UDPConn, error) {
	conn, err := sc.listenPacket()
	if err != nil {
		log.Println("[SOCKS5 Client Error] cannot listen packet", err)
	}
	return conn, err
}

func (sc *SocksClient) listenPacket() (transport.UDPConn, error) {
	var username, password string
	if sc.addr.User != nil {
		username = sc.addr.User.Username()
		password, _ = sc.addr.User.Password()
	}
	client, err := socks5.NewClient(
		sc.addr.Host,
		username, password, 300, 300)
	if err != nil {
		return nil, err
	}

	err = client.Negotiate(nil)
	if err != nil {
		return nil, err
	}

	udpRequest := socks5.NewRequest(socks5.CmdUDP, socks5.ATYPIPv4, []byte{0x00, 0x00, 0x00, 0x00}, []byte{0x00, 0x00})

	reply, err := client.Request(udpRequest)
	if err != nil {
		return nil, err
	}

	udpServerAddr := socks5.ToAddress(reply.Atyp, reply.BndAddr, reply.BndPort)

	conn, err := net.Dial("udp", udpServerAddr)
	if err != nil {
		return nil, err
	}

	return &SocksConn{conn, client}, nil
}

func (s SocksConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
	return s.WriteToUDP(p, addr.(*net.UDPAddr))
}

func (s SocksConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
	return s.ReadFromUDP(p)
}

func (s SocksConn) Read(b []byte) (int, error) {
	panic("implement me")
}

func (s SocksConn) RemoteAddr() net.Addr {
	panic("implement me")
}

func (s SocksConn) Write(b []byte) (int, error) {
	panic("implement me")
}

func (sc *SocksClient) ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) {
	dnsServer, err := net.ResolveUDPAddr("udp", "1.1.1.1:53")
	if err != nil {
		return nil, err
	}
	proxiedResolver := newDnsResolver(sc, dnsServer)
	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
	defer cancel()
	host, port, err := net.SplitHostPort(address)
	if err != nil {
		return nil, err
	}
	ip, err := proxiedResolver.lookupIPAddr(ctx, host, network == "udp6")
	if err != nil {
		return nil, err
	}
	if len(ip) <= 0 {
		return nil, errors.New("cannot resolve hostname: NXDOMAIN")
	}
	switch network {
	case "udp4":
		var v4IPAddr []net.IPAddr
		for _, v := range ip {
			if v.IP.To4() != nil {
				v4IPAddr = append(v4IPAddr, v)
			}
		}
		ip = v4IPAddr
	case "udp6":
		var v6IPAddr []net.IPAddr
		for _, v := range ip {
			if v.IP.To4() == nil {
				v6IPAddr = append(v6IPAddr, v)
			}
		}
		ip = v6IPAddr
	case "udp":
	default:
		return nil, errors.New("unknown network")
	}

	if len(ip) <= 0 {
		return nil, errors.New("cannot resolve hostname: so suitable address")
	}

	portInInt, err := strconv.ParseInt(port, 10, 32)
	return &net.UDPAddr{
		IP:   ip[0].IP,
		Port: int(portInInt),
		Zone: "",
	}, nil
}

func newDnsResolver(sc *SocksClient,
	serverAddress net.Addr) *dnsResolver {
	return &dnsResolver{sc: sc, serverAddress: serverAddress}
}

type dnsResolver struct {
	sc            *SocksClient
	serverAddress net.Addr
}

func (r *dnsResolver) lookupIPAddr(ctx context.Context, host string, ipv6 bool) ([]net.IPAddr, error) {
	packetConn, err := r.sc.listenPacket()
	if err != nil {
		return nil, err
	}
	msg := new(dns.Msg)
	if !ipv6 {
		msg.SetQuestion(dns.Fqdn(host), dns.TypeA)
	} else {
		msg.SetQuestion(dns.Fqdn(host), dns.TypeAAAA)
	}
	encodedMsg, err := msg.Pack()
	if err != nil {
		log.Println(err.Error())
	}
	for i := 2; i >= 0; i-- {
		_, err := packetConn.WriteTo(encodedMsg, r.serverAddress)
		if err != nil {
			log.Println(err.Error())
		}
	}
	ctx, cancel := context.WithTimeout(ctx, time.Second)
	defer cancel()
	go func() {
		<-ctx.Done()
		packetConn.Close()
	}()
	var dataBuf [1600]byte
	n, _, err := packetConn.ReadFrom(dataBuf[:])
	if err != nil {
		return nil, err
	}
	err = msg.Unpack(dataBuf[:n])
	if err != nil {
		return nil, err
	}
	var returnedIPs []net.IPAddr
	for _, resp := range msg.Answer {
		switch respTyped := resp.(type) {
		case *dns.A:
			returnedIPs = append(returnedIPs, net.IPAddr{IP: respTyped.A})
		case *dns.AAAA:
			returnedIPs = append(returnedIPs, net.IPAddr{IP: respTyped.AAAA})
		}
	}
	return returnedIPs, nil
}

func NewTransportWrapper(sc *SocksClient, innerNet transport.Net) transport.Net {
	return &transportWrapper{sc: sc, Net: innerNet}
}

type transportWrapper struct {
	transport.Net
	sc *SocksClient
}

func (t *transportWrapper) ListenUDP(network string, locAddr *net.UDPAddr) (transport.UDPConn, error) {
	return t.sc.ListenPacket(network, nil)
}

func (t *transportWrapper) ListenPacket(network string, address string) (net.PacketConn, error) {
	return t.sc.ListenPacket(network, nil)
}

func (t *transportWrapper) ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) {
	return t.sc.ResolveUDPAddr(network, address)
}
07070100000050000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002200000000snowflake-2.11.0/common/sqsclient07070100000051000081A400000000000000000000000167D9BD4E0000047D000000000000000000000000000000000000002F00000000snowflake-2.11.0/common/sqsclient/sqsclient.gopackage sqsclient

import (
	"context"

	"github.com/aws/aws-sdk-go-v2/service/sqs"
)

type SQSClient interface {
	ReceiveMessage(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error)
	ListQueues(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error)
	GetQueueAttributes(ctx context.Context, input *sqs.GetQueueAttributesInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueAttributesOutput, error)
	DeleteQueue(ctx context.Context, input *sqs.DeleteQueueInput, optFns ...func(*sqs.Options)) (*sqs.DeleteQueueOutput, error)
	CreateQueue(ctx context.Context, input *sqs.CreateQueueInput, optFns ...func(*sqs.Options)) (*sqs.CreateQueueOutput, error)
	SendMessage(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error)
	DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) (*sqs.DeleteMessageOutput, error)
	GetQueueUrl(ctx context.Context, input *sqs.GetQueueUrlInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueUrlOutput, error)
}
07070100000052000081A400000000000000000000000167D9BD4E00001E3D000000000000000000000000000000000000003400000000snowflake-2.11.0/common/sqsclient/sqsclient_mock.go// Code generated by MockGen. DO NOT EDIT.
// Source: common/sqsclient/sqsclient.go

// Package mock_sqsclient is a generated GoMock package.
package sqsclient

import (
	context "context"
	reflect "reflect"

	sqs "github.com/aws/aws-sdk-go-v2/service/sqs"
	gomock "github.com/golang/mock/gomock"
)

// MockSQSClient is a mock of SQSClient interface.
type MockSQSClient struct {
	ctrl     *gomock.Controller
	recorder *MockSQSClientMockRecorder
}

// MockSQSClientMockRecorder is the mock recorder for MockSQSClient.
type MockSQSClientMockRecorder struct {
	mock *MockSQSClient
}

// NewMockSQSClient creates a new mock instance.
func NewMockSQSClient(ctrl *gomock.Controller) *MockSQSClient {
	mock := &MockSQSClient{ctrl: ctrl}
	mock.recorder = &MockSQSClientMockRecorder{mock}
	return mock
}

// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSQSClient) EXPECT() *MockSQSClientMockRecorder {
	return m.recorder
}

// CreateQueue mocks base method.
func (m *MockSQSClient) CreateQueue(ctx context.Context, input *sqs.CreateQueueInput, optFns ...func(*sqs.Options)) (*sqs.CreateQueueOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "CreateQueue", varargs...)
	ret0, _ := ret[0].(*sqs.CreateQueueOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// CreateQueue indicates an expected call of CreateQueue.
func (mr *MockSQSClientMockRecorder) CreateQueue(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueue", reflect.TypeOf((*MockSQSClient)(nil).CreateQueue), varargs...)
}

// DeleteMessage mocks base method.
func (m *MockSQSClient) DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) (*sqs.DeleteMessageOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "DeleteMessage", varargs...)
	ret0, _ := ret[0].(*sqs.DeleteMessageOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// DeleteMessage indicates an expected call of DeleteMessage.
func (mr *MockSQSClientMockRecorder) DeleteMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMessage", reflect.TypeOf((*MockSQSClient)(nil).DeleteMessage), varargs...)
}

// DeleteQueue mocks base method.
func (m *MockSQSClient) DeleteQueue(ctx context.Context, input *sqs.DeleteQueueInput, optFns ...func(*sqs.Options)) (*sqs.DeleteQueueOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "DeleteQueue", varargs...)
	ret0, _ := ret[0].(*sqs.DeleteQueueOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// DeleteQueue indicates an expected call of DeleteQueue.
func (mr *MockSQSClientMockRecorder) DeleteQueue(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockSQSClient)(nil).DeleteQueue), varargs...)
}

// GetQueueAttributes mocks base method.
func (m *MockSQSClient) GetQueueAttributes(ctx context.Context, input *sqs.GetQueueAttributesInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueAttributesOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "GetQueueAttributes", varargs...)
	ret0, _ := ret[0].(*sqs.GetQueueAttributesOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// GetQueueAttributes indicates an expected call of GetQueueAttributes.
func (mr *MockSQSClientMockRecorder) GetQueueAttributes(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueueAttributes", reflect.TypeOf((*MockSQSClient)(nil).GetQueueAttributes), varargs...)
}

// GetQueueUrl mocks base method.
func (m *MockSQSClient) GetQueueUrl(ctx context.Context, input *sqs.GetQueueUrlInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueUrlOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "GetQueueUrl", varargs...)
	ret0, _ := ret[0].(*sqs.GetQueueUrlOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// GetQueueUrl indicates an expected call of GetQueueUrl.
func (mr *MockSQSClientMockRecorder) GetQueueUrl(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueueUrl", reflect.TypeOf((*MockSQSClient)(nil).GetQueueUrl), varargs...)
}

// ListQueues mocks base method.
func (m *MockSQSClient) ListQueues(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "ListQueues", varargs...)
	ret0, _ := ret[0].(*sqs.ListQueuesOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// ListQueues indicates an expected call of ListQueues.
func (mr *MockSQSClientMockRecorder) ListQueues(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockSQSClient)(nil).ListQueues), varargs...)
}

// ReceiveMessage mocks base method.
func (m *MockSQSClient) ReceiveMessage(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "ReceiveMessage", varargs...)
	ret0, _ := ret[0].(*sqs.ReceiveMessageOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// ReceiveMessage indicates an expected call of ReceiveMessage.
func (mr *MockSQSClientMockRecorder) ReceiveMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReceiveMessage", reflect.TypeOf((*MockSQSClient)(nil).ReceiveMessage), varargs...)
}

// SendMessage mocks base method.
func (m *MockSQSClient) SendMessage(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error) {
	m.ctrl.T.Helper()
	varargs := []interface{}{ctx, input}
	for _, a := range optFns {
		varargs = append(varargs, a)
	}
	ret := m.ctrl.Call(m, "SendMessage", varargs...)
	ret0, _ := ret[0].(*sqs.SendMessageOutput)
	ret1, _ := ret[1].(error)
	return ret0, ret1
}

// SendMessage indicates an expected call of SendMessage.
func (mr *MockSQSClientMockRecorder) SendMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call {
	mr.mock.ctrl.T.Helper()
	varargs := append([]interface{}{ctx, input}, optFns...)
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockSQSClient)(nil).SendMessage), varargs...)
}
07070100000053000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002100000000snowflake-2.11.0/common/sqscreds07070100000054000081A400000000000000000000000167D9BD4E00000353000000000000000000000000000000000000003300000000snowflake-2.11.0/common/sqscreds/generate_creds.gopackage main

import (
	"fmt"

	sqscreds "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqscreds/lib"
)

// This script can be run to generate the encoded SQS credentials to pass as a CLI param or SOCKS option to the client
func main() {
	var accessKey, secretKey string

	fmt.Print("Enter Access Key: ")
	_, err := fmt.Scanln(&accessKey)
	if err != nil {
		fmt.Println("Error reading access key:", err)
		return
	}

	fmt.Print("Enter Secret Key: ")
	_, err = fmt.Scanln(&secretKey)
	if err != nil {
		fmt.Println("Error reading access key:", err)
		return
	}

	awsCreds := sqscreds.AwsCreds{AwsAccessKeyId: accessKey, AwsSecretKey: secretKey}
	println()
	println("Encoded Credentials:")
	res, err := awsCreds.Base64()
	if err != nil {
		fmt.Println("Error encoding credentials:", err)
		return
	}
	println(res)
}
07070100000055000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002500000000snowflake-2.11.0/common/sqscreds/lib07070100000056000081A400000000000000000000000167D9BD4E000002A9000000000000000000000000000000000000003200000000snowflake-2.11.0/common/sqscreds/lib/sqs_creds.gopackage sqscreds

import (
	"encoding/base64"
	"encoding/json"
)

type AwsCreds struct {
	AwsAccessKeyId string `json:"aws-access-key-id"`
	AwsSecretKey   string `json:"aws-secret-key"`
}

func (awsCreds AwsCreds) Base64() (string, error) {
	jsonData, err := json.Marshal(awsCreds)
	if err != nil {
		return "", err
	}
	return base64.StdEncoding.EncodeToString(jsonData), nil
}

func AwsCredsFromBase64(base64Str string) (AwsCreds, error) {
	var awsCreds AwsCreds

	jsonData, err := base64.StdEncoding.DecodeString(base64Str)
	if err != nil {
		return awsCreds, err
	}

	err = json.Unmarshal(jsonData, &awsCreds)
	if err != nil {
		return awsCreds, err
	}

	return awsCreds, nil
}
07070100000057000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001D00000000snowflake-2.11.0/common/task07070100000058000081A400000000000000000000000167D9BD4E00000AD5000000000000000000000000000000000000002900000000snowflake-2.11.0/common/task/periodic.go// Package task
// Reused from https://github.com/v2fly/v2ray-core/blob/784775f68922f07d40c9eead63015b2026af2ade/common/task/periodic.go
/*
The MIT License (MIT)

Copyright (c) 2015-2021 V2Ray & V2Fly Community

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package task

import (
	"sync"
	"time"
)

// Periodic is a task that runs periodically.
type Periodic struct {
	// Interval of the task being run
	Interval time.Duration
	// Execute is the task function
	Execute func() error
	// OnError handles the error of the task
	OnError func(error)

	access  sync.Mutex
	timer   *time.Timer
	running bool
}

func (t *Periodic) hasClosed() bool {
	t.access.Lock()
	defer t.access.Unlock()

	return !t.running
}

func (t *Periodic) checkedExecute() error {
	if t.hasClosed() {
		return nil
	}

	if err := t.Execute(); err != nil {
		if t.OnError != nil {
			t.OnError(err)
		} else {
			// default error handling is to shut down the task
			t.access.Lock()
			t.running = false
			t.access.Unlock()
			return err
		}
	}

	t.access.Lock()
	defer t.access.Unlock()

	if !t.running {
		return nil
	}

	t.timer = time.AfterFunc(t.Interval, func() {
		t.checkedExecute()
	})

	return nil
}

// Start implements common.Runnable.
func (t *Periodic) Start() error {
	t.access.Lock()
	if t.running {
		t.access.Unlock()
		return nil
	}
	t.running = true
	t.access.Unlock()

	if err := t.checkedExecute(); err != nil {
		t.access.Lock()
		t.running = false
		t.access.Unlock()
		return err
	}

	return nil
}

func (t *Periodic) WaitThenStart() {
	time.AfterFunc(t.Interval, func() {
		t.Start()
	})
}

// Close implements common.Closable.
func (t *Periodic) Close() error {
	t.access.Lock()
	defer t.access.Unlock()

	t.running = false
	if t.timer != nil {
		t.timer.Stop()
		t.timer = nil
	}

	return nil
}
07070100000059000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002400000000snowflake-2.11.0/common/turbotunnel0707010000005A000081A400000000000000000000000167D9BD4E000003A5000000000000000000000000000000000000003000000000snowflake-2.11.0/common/turbotunnel/clientid.gopackage turbotunnel

import (
	"crypto/rand"
	"encoding/hex"
)

// ClientID is an abstract identifier that binds together all the communications
// belonging to a single client session, even though those communications may
// arrive from multiple IP addresses or over multiple lower-level connections.
// It plays the same role that an (IP address, port number) tuple plays in a
// net.UDPConn: it's the return address pertaining to a long-lived abstract
// client session. The client attaches its ClientID to each of its
// communications, enabling the server to disambiguate requests among its many
// clients. ClientID implements the net.Addr interface.
type ClientID [8]byte

func NewClientID() ClientID {
	var id ClientID
	_, err := rand.Read(id[:])
	if err != nil {
		panic(err)
	}
	return id
}

func (id ClientID) Network() string { return "clientid" }
func (id ClientID) String() string  { return hex.EncodeToString(id[:]) }
0707010000005B000081A400000000000000000000000167D9BD4E000010C4000000000000000000000000000000000000003100000000snowflake-2.11.0/common/turbotunnel/clientmap.gopackage turbotunnel

import (
	"container/heap"
	"net"
	"sync"
	"time"
)

// clientRecord is a record of a recently seen client, with the time it was last
// seen and a send queue.
type clientRecord struct {
	Addr      net.Addr
	LastSeen  time.Time
	SendQueue chan []byte
}

// ClientMap manages a mapping of live clients (keyed by address, which will be
// a ClientID) to their respective send queues. ClientMap's functions are safe
// to call from multiple goroutines.
type ClientMap struct {
	// We use an inner structure to avoid exposing public heap.Interface
	// functions to users of clientMap.
	inner clientMapInner
	// Synchronizes access to inner.
	lock sync.Mutex
}

// NewClientMap creates a ClientMap that expires clients after a timeout.
//
// The timeout does not have to be kept in sync with smux's internal idle
// timeout. If a client is removed from the client map while the smux session is
// still live, the worst that can happen is a loss of whatever packets were in
// the send queue at the time. If smux later decides to send more packets to the
// same client, we'll instantiate a new send queue, and if the client ever
// connects again with the proper client ID, we'll deliver them.
func NewClientMap(timeout time.Duration) *ClientMap {
	m := &ClientMap{
		inner: clientMapInner{
			byAge:  make([]*clientRecord, 0),
			byAddr: make(map[net.Addr]int),
		},
	}
	go func() {
		for {
			time.Sleep(timeout / 2)
			now := time.Now()
			m.lock.Lock()
			m.inner.removeExpired(now, timeout)
			m.lock.Unlock()
		}
	}()
	return m
}

// SendQueue returns the send queue corresponding to addr, creating it if
// necessary.
func (m *ClientMap) SendQueue(addr net.Addr) chan []byte {
	m.lock.Lock()
	queue := m.inner.SendQueue(addr, time.Now())
	m.lock.Unlock()
	return queue
}

// clientMapInner is the inner type of ClientMap, implementing heap.Interface.
// byAge is the backing store, a heap ordered by LastSeen time, to facilitate
// expiring old client records. byAddr is a map from addresses (i.e., ClientIDs)
// to heap indices, to allow looking up by address. Unlike ClientMap,
// clientMapInner requires external synchonization.
type clientMapInner struct {
	byAge  []*clientRecord
	byAddr map[net.Addr]int
}

// removeExpired removes all client records whose LastSeen timestamp is more
// than timeout in the past.
func (inner *clientMapInner) removeExpired(now time.Time, timeout time.Duration) {
	for len(inner.byAge) > 0 && now.Sub(inner.byAge[0].LastSeen) >= timeout {
		heap.Pop(inner)
	}
}

// SendQueue finds the existing client record corresponding to addr, or creates
// a new one if none exists yet. It updates the client record's LastSeen time
// and returns its SendQueue.
func (inner *clientMapInner) SendQueue(addr net.Addr, now time.Time) chan []byte {
	var record *clientRecord
	i, ok := inner.byAddr[addr]
	if ok {
		// Found one, update its LastSeen.
		record = inner.byAge[i]
		record.LastSeen = now
		heap.Fix(inner, i)
	} else {
		// Not found, create a new one.
		record = &clientRecord{
			Addr:      addr,
			LastSeen:  now,
			SendQueue: make(chan []byte, queueSize),
		}
		heap.Push(inner, record)
	}
	return record.SendQueue
}

// heap.Interface for clientMapInner.

func (inner *clientMapInner) Len() int {
	if len(inner.byAge) != len(inner.byAddr) {
		panic("inconsistent clientMap")
	}
	return len(inner.byAge)
}

func (inner *clientMapInner) Less(i, j int) bool {
	return inner.byAge[i].LastSeen.Before(inner.byAge[j].LastSeen)
}

func (inner *clientMapInner) Swap(i, j int) {
	inner.byAge[i], inner.byAge[j] = inner.byAge[j], inner.byAge[i]
	inner.byAddr[inner.byAge[i].Addr] = i
	inner.byAddr[inner.byAge[j].Addr] = j
}

func (inner *clientMapInner) Push(x interface{}) {
	record := x.(*clientRecord)
	if _, ok := inner.byAddr[record.Addr]; ok {
		panic("duplicate address in clientMap")
	}
	// Insert into byAddr map.
	inner.byAddr[record.Addr] = len(inner.byAge)
	// Insert into byAge slice.
	inner.byAge = append(inner.byAge, record)
}

func (inner *clientMapInner) Pop() interface{} {
	n := len(inner.byAddr)
	// Remove from byAge slice.
	record := inner.byAge[n-1]
	inner.byAge[n-1] = nil
	inner.byAge = inner.byAge[:n-1]
	// Remove from byAddr map.
	delete(inner.byAddr, record.Addr)
	close(record.SendQueue)
	return record
}
0707010000005C000081A400000000000000000000000167D9BD4E0000019D000000000000000000000000000000000000003600000000snowflake-2.11.0/common/turbotunnel/clientmap_test.gopackage turbotunnel

import (
	"testing"
	"time"
)

// Benchmark the ClientMap.SendQueue function. This is mainly measuring the cost
// of the mutex operations around the call to clientMapInner.SendQueue.
func BenchmarkSendQueue(b *testing.B) {
	m := NewClientMap(1 * time.Hour)
	id := NewClientID()
	m.SendQueue(id) // populate the entry for id
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		m.SendQueue(id)
	}
}
0707010000005D000081A400000000000000000000000167D9BD4E0000023C000000000000000000000000000000000000002E00000000snowflake-2.11.0/common/turbotunnel/consts.go// Package turbotunnel provides support for overlaying a virtual net.PacketConn
// on some other network carrier.
//
// https://github.com/net4people/bbs/issues/9
package turbotunnel

import "errors"

// This magic prefix is how a client opts into turbo tunnel mode. It is just a
// randomly generated byte string.
var Token = [8]byte{0x12, 0x93, 0x60, 0x5d, 0x27, 0x81, 0x75, 0xf5}

// The size of receive and send queues.
const queueSize = 512

var errClosedPacketConn = errors.New("operation on closed connection")
var errNotImplemented = errors.New("not implemented")
0707010000005E000081A400000000000000000000000167D9BD4E00001580000000000000000000000000000000000000003700000000snowflake-2.11.0/common/turbotunnel/queuepacketconn.gopackage turbotunnel

import (
	"net"
	"sync"
	"sync/atomic"
	"time"
)

// taggedPacket is a combination of a []byte and a net.Addr, encapsulating the
// return type of PacketConn.ReadFrom.
type taggedPacket struct {
	P    []byte
	Addr net.Addr
}

// QueuePacketConn implements net.PacketConn by storing queues of packets. There
// is one incoming queue (where packets are additionally tagged by the source
// address of the client that sent them). There are many outgoing queues, one
// for each client address that has been recently seen. The QueueIncoming method
// inserts a packet into the incoming queue, to eventually be returned by
// ReadFrom. WriteTo inserts a packet into an address-specific outgoing queue,
// which can later by accessed through the OutgoingQueue method.
type QueuePacketConn struct {
	clients   *ClientMap
	localAddr net.Addr
	recvQueue chan taggedPacket
	closeOnce sync.Once
	closed    chan struct{}
	mtu       int
	// Pool of reusable mtu-sized buffers.
	bufPool sync.Pool
	// What error to return when the QueuePacketConn is closed.
	err atomic.Value
}

// NewQueuePacketConn makes a new QueuePacketConn, set to track recent clients
// for at least a duration of timeout. The maximum packet size is mtu.
func NewQueuePacketConn(localAddr net.Addr, timeout time.Duration, mtu int) *QueuePacketConn {
	return &QueuePacketConn{
		clients:   NewClientMap(timeout),
		localAddr: localAddr,
		recvQueue: make(chan taggedPacket, queueSize),
		closed:    make(chan struct{}),
		mtu:       mtu,
		bufPool:   sync.Pool{New: func() interface{} { return make([]byte, mtu) }},
	}
}

// QueueIncoming queues an incoming packet and its source address, to be
// returned in a future call to ReadFrom. If p is longer than the MTU, only its
// first MTU bytes will be used.
func (c *QueuePacketConn) QueueIncoming(p []byte, addr net.Addr) {
	select {
	case <-c.closed:
		// If we're closed, silently drop it.
		return
	default:
	}
	// Copy the slice so that the caller may reuse it.
	buf := c.bufPool.Get().([]byte)
	if len(p) < cap(buf) {
		buf = buf[:len(p)]
	} else {
		buf = buf[:cap(buf)]
	}
	copy(buf, p)
	select {
	case c.recvQueue <- taggedPacket{buf, addr}:
	default:
		// Drop the incoming packet if the receive queue is full.
		c.Restore(buf)
	}
}

// OutgoingQueue returns the queue of outgoing packets corresponding to addr,
// creating it if necessary. The contents of the queue will be packets that are
// written to the address in question using WriteTo.
func (c *QueuePacketConn) OutgoingQueue(addr net.Addr) <-chan []byte {
	return c.clients.SendQueue(addr)
}

// Restore adds a slice to the internal pool of packet buffers. Typically you
// will call this with a slice from the OutgoingQueue channel once you are done
// using it. (It is not an error to fail to do so, it will just result in more
// allocations.)
func (c *QueuePacketConn) Restore(p []byte) {
	if cap(p) >= c.mtu {
		c.bufPool.Put(p)
	}
}

// ReadFrom returns a packet and address previously stored by QueueIncoming.
func (c *QueuePacketConn) ReadFrom(p []byte) (int, net.Addr, error) {
	select {
	case <-c.closed:
		return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)}
	default:
	}
	select {
	case <-c.closed:
		return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)}
	case packet := <-c.recvQueue:
		n := copy(p, packet.P)
		c.Restore(packet.P)
		return n, packet.Addr, nil
	}
}

// WriteTo queues an outgoing packet for the given address. The queue can later
// be retrieved using the OutgoingQueue method. If p is longer than the MTU,
// only its first MTU bytes will be used.
func (c *QueuePacketConn) WriteTo(p []byte, addr net.Addr) (int, error) {
	select {
	case <-c.closed:
		return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)}
	default:
	}
	// Copy the slice so that the caller may reuse it.
	buf := c.bufPool.Get().([]byte)
	if len(p) < cap(buf) {
		buf = buf[:len(p)]
	} else {
		buf = buf[:cap(buf)]
	}
	copy(buf, p)
	select {
	case c.clients.SendQueue(addr) <- buf:
		return len(buf), nil
	default:
		// Drop the outgoing packet if the send queue is full.
		c.Restore(buf)
		return len(p), nil
	}
}

// closeWithError unblocks pending operations and makes future operations fail
// with the given error. If err is nil, it becomes errClosedPacketConn.
func (c *QueuePacketConn) closeWithError(err error) error {
	var newlyClosed bool
	c.closeOnce.Do(func() {
		newlyClosed = true
		// Store the error to be returned by future PacketConn
		// operations.
		if err == nil {
			err = errClosedPacketConn
		}
		c.err.Store(err)
		close(c.closed)
	})
	if !newlyClosed {
		return &net.OpError{Op: "close", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)}
	}
	return nil
}

// Close unblocks pending operations and makes future operations fail with a
// "closed connection" error.
func (c *QueuePacketConn) Close() error {
	return c.closeWithError(nil)
}

// LocalAddr returns the localAddr value that was passed to NewQueuePacketConn.
func (c *QueuePacketConn) LocalAddr() net.Addr { return c.localAddr }

func (c *QueuePacketConn) SetDeadline(t time.Time) error      { return errNotImplemented }
func (c *QueuePacketConn) SetReadDeadline(t time.Time) error  { return errNotImplemented }
func (c *QueuePacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented }
0707010000005F000081A400000000000000000000000167D9BD4E00001ABA000000000000000000000000000000000000003C00000000snowflake-2.11.0/common/turbotunnel/queuepacketconn_test.gopackage turbotunnel

import (
	"bytes"
	"fmt"
	"net"
	"sync"
	"testing"
	"time"

	"github.com/xtaci/kcp-go/v5"
)

type emptyAddr struct{}

func (_ emptyAddr) Network() string { return "empty" }
func (_ emptyAddr) String() string  { return "empty" }

type intAddr int

func (i intAddr) Network() string { return "int" }
func (i intAddr) String() string  { return fmt.Sprintf("%d", i) }

// Run with -benchmem to see memory allocations.
func BenchmarkQueueIncoming(b *testing.B) {
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500)
	defer conn.Close()

	b.ResetTimer()
	var p [500]byte
	for i := 0; i < b.N; i++ {
		conn.QueueIncoming(p[:], emptyAddr{})
	}
	b.StopTimer()
}

// BenchmarkWriteTo benchmarks the QueuePacketConn.WriteTo function.
func BenchmarkWriteTo(b *testing.B) {
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500)
	defer conn.Close()

	b.ResetTimer()
	var p [500]byte
	for i := 0; i < b.N; i++ {
		conn.WriteTo(p[:], emptyAddr{})
	}
	b.StopTimer()
}

// TestQueueIncomingOversize tests that QueueIncoming truncates packets that are
// larger than the MTU.
func TestQueueIncomingOversize(t *testing.T) {
	const payload = "abcdefghijklmnopqrstuvwxyz"
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, len(payload)-1)
	defer conn.Close()
	conn.QueueIncoming([]byte(payload), emptyAddr{})
	var p [500]byte
	n, _, err := conn.ReadFrom(p[:])
	if err != nil {
		t.Fatal(err)
	}
	if !bytes.Equal(p[:n], []byte(payload[:len(payload)-1])) {
		t.Fatalf("payload was %+q, expected %+q", p[:n], payload[:len(payload)-1])
	}
}

// TestWriteToOversize tests that WriteTo truncates packets that are larger than
// the MTU.
func TestWriteToOversize(t *testing.T) {
	const payload = "abcdefghijklmnopqrstuvwxyz"
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, len(payload)-1)
	defer conn.Close()
	conn.WriteTo([]byte(payload), emptyAddr{})
	p := <-conn.OutgoingQueue(emptyAddr{})
	if !bytes.Equal(p, []byte(payload[:len(payload)-1])) {
		t.Fatalf("payload was %+q, expected %+q", p, payload[:len(payload)-1])
	}
}

// TestRestoreMTU tests that Restore ignores any inputs that are not at least
// MTU-sized.
func TestRestoreMTU(t *testing.T) {
	const mtu = 500
	const payload = "hello"
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, mtu)
	defer conn.Close()
	conn.Restore(make([]byte, mtu-1))
	// This WriteTo may use the short slice we just gave to Restore.
	conn.WriteTo([]byte(payload), emptyAddr{})
	// Read the queued slice and ensure its capacity is at least the MTU.
	p := <-conn.OutgoingQueue(emptyAddr{})
	if cap(p) != mtu {
		t.Fatalf("cap was %v, expected %v", cap(p), mtu)
	}
	// Check the payload while we're at it.
	if !bytes.Equal(p, []byte(payload)) {
		t.Fatalf("payload was %+q, expected %+q", p, payload)
	}
}

// TestRestoreCap tests that Restore can use slices whose cap is at least the
// MTU, even if the len is shorter.
func TestRestoreCap(t *testing.T) {
	const mtu = 500
	const payload = "hello"
	conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, mtu)
	defer conn.Close()
	conn.Restore(make([]byte, 0, mtu))
	conn.WriteTo([]byte(payload), emptyAddr{})
	p := <-conn.OutgoingQueue(emptyAddr{})
	if !bytes.Equal(p, []byte(payload)) {
		t.Fatalf("payload was %+q, expected %+q", p, payload)
	}
}

// DiscardPacketConn is a net.PacketConn whose ReadFrom method block forever and
// whose WriteTo method discards whatever it is called with.
type DiscardPacketConn struct{}

func (_ DiscardPacketConn) ReadFrom(_ []byte) (int, net.Addr, error)  { select {} } // block forever
func (_ DiscardPacketConn) WriteTo(p []byte, _ net.Addr) (int, error) { return len(p), nil }
func (_ DiscardPacketConn) Close() error                              { return nil }
func (_ DiscardPacketConn) LocalAddr() net.Addr                       { return emptyAddr{} }
func (_ DiscardPacketConn) SetDeadline(t time.Time) error             { return nil }
func (_ DiscardPacketConn) SetReadDeadline(t time.Time) error         { return nil }
func (_ DiscardPacketConn) SetWriteDeadline(t time.Time) error        { return nil }

// TranscriptPacketConn keeps a log of the []byte argument to every call to
// WriteTo.
type TranscriptPacketConn struct {
	Transcript [][]byte
	lock       sync.Mutex
	net.PacketConn
}

func NewTranscriptPacketConn(inner net.PacketConn) *TranscriptPacketConn {
	return &TranscriptPacketConn{
		PacketConn: inner,
	}
}

func (c *TranscriptPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) {
	c.lock.Lock()
	defer c.lock.Unlock()

	p2 := make([]byte, len(p))
	copy(p2, p)
	c.Transcript = append(c.Transcript, p2)

	return c.PacketConn.WriteTo(p, addr)
}

// Tests that QueuePacketConn.WriteTo is compatible with the way kcp-go uses
// PacketConn, allocating source buffers in a sync.Pool.
//
// https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40260
func TestQueuePacketConnWriteToKCP(t *testing.T) {
	// Start a goroutine to constantly exercise kcp UDPSession.tx, writing
	// packets with payload "XXXX".
	done := make(chan struct{}, 0)
	defer close(done)
	ready := make(chan struct{}, 0)
	go func() {
		var readyClose sync.Once
		defer readyClose.Do(func() { close(ready) })
		pconn := DiscardPacketConn{}
		defer pconn.Close()
	loop:
		for {
			select {
			case <-done:
				break loop
			default:
			}
			// Create a new UDPSession, send once, then discard the
			// UDPSession.
			conn, err := kcp.NewConn2(intAddr(2), nil, 0, 0, pconn)
			if err != nil {
				panic(err)
			}
			_, err = conn.Write([]byte("XXXX"))
			if err != nil {
				panic(err)
			}
			conn.Close()
			// Signal the main test to start once we have done one
			// iterator of this noisy loop.
			readyClose.Do(func() { close(ready) })
		}
	}()

	pconn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500)
	defer pconn.Close()
	addr1 := intAddr(1)
	outgoing := pconn.OutgoingQueue(addr1)

	// Once the "XXXX" goroutine is started, repeatedly send a packet, wait,
	// then retrieve it and check whether it has changed since being sent.
	<-ready
	for i := 0; i < 10; i++ {
		transcript := NewTranscriptPacketConn(pconn)
		conn, err := kcp.NewConn2(addr1, nil, 0, 0, transcript)
		if err != nil {
			panic(err)
		}
		_, err = conn.Write([]byte("hello world"))
		if err != nil {
			panic(err)
		}

		err = conn.Close()
		if err != nil {
			panic(err)
		}

		// A sleep after the Write makes buffer reuse more likely.
		time.Sleep(100 * time.Millisecond)

		if len(transcript.Transcript) == 0 {
			panic("empty transcript")
		}

		for j, tr := range transcript.Transcript {
			p := <-outgoing
			// This test is meant to detect unsynchronized memory
			// changes, so freeze the slice we just read.
			p2 := make([]byte, len(p))
			copy(p2, p)
			if !bytes.Equal(p2, tr) {
				t.Fatalf("%d %d packet changed between send and recv\nsend: %+q\nrecv: %+q", i, j, tr, p2)
			}
		}
	}
}
07070100000060000081A400000000000000000000000167D9BD4E000016B7000000000000000000000000000000000000003800000000snowflake-2.11.0/common/turbotunnel/redialpacketconn.gopackage turbotunnel

import (
	"context"
	"errors"
	"net"
	"sync"
	"sync/atomic"
	"time"
)

// RedialPacketConn implements a long-lived net.PacketConn atop a sequence of
// other, transient net.PacketConns. RedialPacketConn creates a new
// net.PacketConn by calling a provided dialContext function. Whenever the
// net.PacketConn experiences a ReadFrom or WriteTo error, RedialPacketConn
// calls the dialContext function again and starts sending and receiving packets
// on the new net.PacketConn. RedialPacketConn's own ReadFrom and WriteTo
// methods return an error only when the dialContext function returns an error.
//
// RedialPacketConn uses static local and remote addresses that are independent
// of those of any dialed net.PacketConn.
type RedialPacketConn struct {
	localAddr   net.Addr
	remoteAddr  net.Addr
	dialContext func(context.Context) (net.PacketConn, error)
	recvQueue   chan []byte
	sendQueue   chan []byte
	closed      chan struct{}
	closeOnce   sync.Once
	// The first dial error, which causes the clientPacketConn to be
	// closed and is returned from future read/write operations. Compare to
	// the rerr and werr in io.Pipe.
	err atomic.Value
}

// NewRedialPacketConn makes a new RedialPacketConn, with the given static local
// and remote addresses, and dialContext function.
func NewRedialPacketConn(
	localAddr, remoteAddr net.Addr,
	dialContext func(context.Context) (net.PacketConn, error),
) *RedialPacketConn {
	c := &RedialPacketConn{
		localAddr:   localAddr,
		remoteAddr:  remoteAddr,
		dialContext: dialContext,
		recvQueue:   make(chan []byte, queueSize),
		sendQueue:   make(chan []byte, queueSize),
		closed:      make(chan struct{}),
		err:         atomic.Value{},
	}
	go c.dialLoop()
	return c
}

// dialLoop repeatedly calls c.dialContext and passes the resulting
// net.PacketConn to c.exchange. It returns only when c is closed or dialContext
// returns an error.
func (c *RedialPacketConn) dialLoop() {
	ctx, cancel := context.WithCancel(context.Background())
	for {
		select {
		case <-c.closed:
			cancel()
			return
		default:
		}
		conn, err := c.dialContext(ctx)
		if err != nil {
			c.closeWithError(err)
			cancel()
			return
		}
		c.exchange(conn)
		conn.Close()
	}
}

// exchange calls ReadFrom on the given net.PacketConn and places the resulting
// packets in the receive queue, and takes packets from the send queue and calls
// WriteTo on them, making the current net.PacketConn active.
func (c *RedialPacketConn) exchange(conn net.PacketConn) {
	readErrCh := make(chan error)
	writeErrCh := make(chan error)

	go func() {
		defer close(readErrCh)
		for {
			select {
			case <-c.closed:
				return
			case <-writeErrCh:
				return
			default:
			}

			var buf [1500]byte
			n, _, err := conn.ReadFrom(buf[:])
			if err != nil {
				readErrCh <- err
				return
			}
			p := make([]byte, n)
			copy(p, buf[:])
			select {
			case c.recvQueue <- p:
			default: // OK to drop packets.
			}
		}
	}()

	go func() {
		defer close(writeErrCh)
		for {
			select {
			case <-c.closed:
				return
			case <-readErrCh:
				return
			case p := <-c.sendQueue:
				_, err := conn.WriteTo(p, c.remoteAddr)
				if err != nil {
					writeErrCh <- err
					return
				}
			}
		}
	}()

	select {
	case <-readErrCh:
	case <-writeErrCh:
	}
}

// ReadFrom reads a packet from the currently active net.PacketConn. The
// packet's original remote address is replaced with the RedialPacketConn's own
// remote address.
func (c *RedialPacketConn) ReadFrom(p []byte) (int, net.Addr, error) {
	select {
	case <-c.closed:
		return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)}
	default:
	}
	select {
	case <-c.closed:
		return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)}
	case buf := <-c.recvQueue:
		return copy(p, buf), c.remoteAddr, nil
	}
}

// WriteTo writes a packet to the currently active net.PacketConn. The addr
// argument is ignored and instead replaced with the RedialPacketConn's own
// remote address.
func (c *RedialPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) {
	// addr is ignored.
	select {
	case <-c.closed:
		return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)}
	default:
	}
	buf := make([]byte, len(p))
	copy(buf, p)
	select {
	case c.sendQueue <- buf:
		return len(buf), nil
	default:
		// Drop the outgoing packet if the send queue is full.
		return len(buf), nil
	}
}

// closeWithError unblocks pending operations and makes future operations fail
// with the given error. If err is nil, it becomes errClosedPacketConn.
func (c *RedialPacketConn) closeWithError(err error) error {
	var once bool
	c.closeOnce.Do(func() {
		// Store the error to be returned by future read/write
		// operations.
		if err == nil {
			err = errors.New("operation on closed connection")
		}
		c.err.Store(err)
		close(c.closed)
		once = true
	})
	if !once {
		return &net.OpError{Op: "close", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)}
	}
	return nil
}

// Close unblocks pending operations and makes future operations fail with a
// "closed connection" error.
func (c *RedialPacketConn) Close() error {
	return c.closeWithError(nil)
}

// LocalAddr returns the localAddr value that was passed to NewRedialPacketConn.
func (c *RedialPacketConn) LocalAddr() net.Addr { return c.localAddr }

func (c *RedialPacketConn) SetDeadline(t time.Time) error      { return errNotImplemented }
func (c *RedialPacketConn) SetReadDeadline(t time.Time) error  { return errNotImplemented }
func (c *RedialPacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented }
07070100000061000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001D00000000snowflake-2.11.0/common/util07070100000062000081A400000000000000000000000167D9BD4E00001537000000000000000000000000000000000000002500000000snowflake-2.11.0/common/util/util.gopackage util

import (
	"encoding/json"
	"errors"
	"log"
	"net"
	"net/http"
	"slices"
	"sort"

	"github.com/pion/ice/v4"
	"github.com/pion/sdp/v3"
	"github.com/pion/webrtc/v4"
	"github.com/realclientip/realclientip-go"
)

func SerializeSessionDescription(desc *webrtc.SessionDescription) (string, error) {
	bytes, err := json.Marshal(*desc)
	if err != nil {
		return "", err
	}
	return string(bytes), nil
}

func DeserializeSessionDescription(msg string) (*webrtc.SessionDescription, error) {
	var parsed map[string]interface{}
	err := json.Unmarshal([]byte(msg), &parsed)
	if err != nil {
		return nil, err
	}
	if _, ok := parsed["type"]; !ok {
		return nil, errors.New("cannot deserialize SessionDescription without type field")
	}
	if _, ok := parsed["sdp"]; !ok {
		return nil, errors.New("cannot deserialize SessionDescription without sdp field")
	}

	var stype webrtc.SDPType
	switch parsed["type"].(string) {
	default:
		return nil, errors.New("Unknown SDP type")
	case "offer":
		stype = webrtc.SDPTypeOffer
	case "pranswer":
		stype = webrtc.SDPTypePranswer
	case "answer":
		stype = webrtc.SDPTypeAnswer
	case "rollback":
		stype = webrtc.SDPTypeRollback
	}

	return &webrtc.SessionDescription{
		Type: stype,
		SDP:  parsed["sdp"].(string),
	}, nil
}

func IsLocal(ip net.IP) bool {
	if ip.IsPrivate() {
		return true
	}
	// Dynamic Configuration as per https://tools.ietf.org/htm/rfc3927
	if ip.IsLinkLocalUnicast() {
		return true
	}
	if ip4 := ip.To4(); ip4 != nil {
		// Carrier-Grade NAT as per https://tools.ietf.org/htm/rfc6598
		if ip4[0] == 100 && ip4[1]&0xc0 == 64 {
			return true
		}
	}
	return false
}

// Removes local LAN address ICE candidates
//
// This is unused after https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/merge_requests/442,
// but come in handy later for https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40322
// Also this is exported, so let's not remove it at least until
// the next major release.
func StripLocalAddresses(str string) string {
	var desc sdp.SessionDescription
	err := desc.Unmarshal([]byte(str))
	if err != nil {
		return str
	}
	for _, m := range desc.MediaDescriptions {
		attrs := make([]sdp.Attribute, 0)
		for _, a := range m.Attributes {
			if a.IsICECandidate() {
				c, err := ice.UnmarshalCandidate(a.Value)
				if err == nil && c.Type() == ice.CandidateTypeHost {
					ip := net.ParseIP(c.Address())
					if ip != nil && (IsLocal(ip) || ip.IsUnspecified() || ip.IsLoopback()) {
						/* no append in this case */
						continue
					}
				}
			}
			attrs = append(attrs, a)
		}
		m.Attributes = attrs
	}
	bts, err := desc.Marshal()
	if err != nil {
		return str
	}
	return string(bts)
}

// Attempts to retrieve the client IP of where the HTTP request originating.
// There is no standard way to do this since the original client IP can be included in a number of different headers,
// depending on the proxies and load balancers between the client and the server. We attempt to check as many of these
// headers as possible to determine a "best guess" of the client IP
// Using this as a reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded
func GetClientIp(req *http.Request) string {
	// We check the "Fowarded" header first, followed by the "X-Forwarded-For" header, and then use the "RemoteAddr" as
	// a last resort. We use the leftmost address since it is the closest one to the client.
	strat := realclientip.NewChainStrategy(
		realclientip.Must(realclientip.NewLeftmostNonPrivateStrategy("Forwarded")),
		realclientip.Must(realclientip.NewLeftmostNonPrivateStrategy("X-Forwarded-For")),
		realclientip.RemoteAddrStrategy{},
	)
	clientIp := strat.ClientIP(req.Header, req.RemoteAddr)
	return clientIp
}

// Returns a list of IP addresses of ICE candidates, roughly in descending order for accuracy for geolocation
func GetCandidateAddrs(sdpStr string) []net.IP {
	var desc sdp.SessionDescription
	err := desc.Unmarshal([]byte(sdpStr))
	if err != nil {
		log.Printf("GetCandidateAddrs: failed to unmarshal SDP: %v\n", err)
		return []net.IP{}
	}

	iceCandidates := make([]ice.Candidate, 0)

	for _, m := range desc.MediaDescriptions {
		for _, a := range m.Attributes {
			if a.IsICECandidate() {
				c, err := ice.UnmarshalCandidate(a.Value)
				if err == nil {
					iceCandidates = append(iceCandidates, c)
				}
			}
		}
	}

	// ICE candidates are first sorted in asecending order of priority, to match convention of providing a custom Less
	// function to sort
	sort.Slice(iceCandidates, func(i, j int) bool {
		if iceCandidates[i].Type() != iceCandidates[j].Type() {
			// Sort by candidate type first, in the order specified in https://datatracker.ietf.org/doc/html/rfc8445#section-5.1.2.2
			// Higher priority candidate types are more efficient, which likely means they are closer to the client
			// itself, providing a more accurate result for geolocation
			return ice.CandidateType(iceCandidates[i].Type().Preference()) < ice.CandidateType(iceCandidates[j].Type().Preference())
		}
		// Break ties with the ICE candidate's priority property
		return iceCandidates[i].Priority() < iceCandidates[j].Priority()
	})
	slices.Reverse(iceCandidates)

	sortedIpAddr := make([]net.IP, 0)
	for _, c := range iceCandidates {
		ip := net.ParseIP(c.Address())
		if ip != nil {
			sortedIpAddr = append(sortedIpAddr, ip)
		}
	}
	return sortedIpAddr
}
07070100000063000081A400000000000000000000000167D9BD4E00001160000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/util/util_test.gopackage util

import (
	"net"
	"net/http"
	"testing"

	. "github.com/smartystreets/goconvey/convey"
)

func TestUtil(t *testing.T) {
	Convey("Strip", t, func() {
		const offerStart = "v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\n"
		const goodCandidate = "a=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ host generation 0 network-id 1 network-cost 50\r\n"
		const offerEnd = "a=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"

		offer := offerStart + goodCandidate +
			"a=candidate:3769337065 1 udp 2122260223 192.168.0.100 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4
			"a=candidate:3769337065 1 udp 2122260223 100.127.50.5 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4
			"a=candidate:3769337065 1 udp 2122260223 169.254.250.88 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4
			"a=candidate:3769337065 1 udp 2122260223 fdf8:f53b:82e4::53 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv6
			"a=candidate:3769337065 1 udp 2122260223 0.0.0.0 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsUnspecified IPv4
			"a=candidate:3769337065 1 udp 2122260223 :: 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsUnspecified IPv6
			"a=candidate:3769337065 1 udp 2122260223 127.0.0.1 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLoopback IPv4
			"a=candidate:3769337065 1 udp 2122260223 ::1 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLoopback IPv6
			offerEnd

		So(StripLocalAddresses(offer), ShouldEqual, offerStart+goodCandidate+offerEnd)
	})

	Convey("GetClientIp", t, func() {
		// Should use Forwarded header
		req1, _ := http.NewRequest("GET", "https://example.com", nil)
		req1.Header.Add("X-Forwarded-For", "1.1.1.1, 2001:db8:cafe::99%eth0, 3.3.3.3, 192.168.1.1")
		req1.Header.Add("Forwarded", `For=fe80::abcd;By=fe80::1234, Proto=https;For=::ffff:188.0.2.128, For="[2001:db8:cafe::17]:4848", For=fc00::1`)
		req1.RemoteAddr = "192.168.1.2:8888"
		So(GetClientIp(req1), ShouldEqual, "188.0.2.128")

		// Should use X-Forwarded-For header
		req2, _ := http.NewRequest("GET", "https://example.com", nil)
		req2.Header.Add("X-Forwarded-For", "1.1.1.1, 2001:db8:cafe::99%eth0, 3.3.3.3, 192.168.1.1")
		req2.RemoteAddr = "192.168.1.2:8888"
		So(GetClientIp(req2), ShouldEqual, "1.1.1.1")

		// Should use RemoteAddr
		req3, _ := http.NewRequest("GET", "https://example.com", nil)
		req3.RemoteAddr = "192.168.1.2:8888"
		So(GetClientIp(req3), ShouldEqual, "192.168.1.2")

		// Should return empty client IP
		req4, _ := http.NewRequest("GET", "https://example.com", nil)
		So(GetClientIp(req4), ShouldEqual, "")
	})

	Convey("GetCandidateAddrs", t, func() {
		// Should prioritize type in the following order: https://datatracker.ietf.org/doc/html/rfc8445#section-5.1.2.2
		// Break ties using priority value
		const offerStart = "v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\n"
		const offerEnd = "a=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"

		const sdp = offerStart + "a=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ prflx\r\n" +
			"a=candidate:3769337065 1 udp 2122260223 129.97.124.13 56688 typ relay\r\n" +
			"a=candidate:3769337065 1 udp 2122260223 129.97.124.14 56688 typ srflx\r\n" +
			"a=candidate:3769337065 1 udp 2122260223 129.97.124.15 56688 typ host\r\n" +
			"a=candidate:3769337065 1 udp 2122260224 129.97.124.16 56688 typ host\r\n" + offerEnd

		So(GetCandidateAddrs(sdp), ShouldEqual, []net.IP{
			net.ParseIP("129.97.124.16"),
			net.ParseIP("129.97.124.15"),
			net.ParseIP("8.8.8.8"),
			net.ParseIP("129.97.124.14"),
			net.ParseIP("129.97.124.13"),
		})
	})
}
07070100000064000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002000000000snowflake-2.11.0/common/version07070100000065000081A400000000000000000000000167D9BD4E00000064000000000000000000000000000000000000002C00000000snowflake-2.11.0/common/version/combined.gopackage version

func ConstructResult() string {
	return GetVersion() + "\n" + GetVersionDetail()
}
07070100000066000081A400000000000000000000000167D9BD4E000000D5000000000000000000000000000000000000002A00000000snowflake-2.11.0/common/version/detail.gopackage version

import "strings"

var detailBuilder strings.Builder

func AddVersionDetail(detail string) {
	detailBuilder.WriteString(detail)
}

func GetVersionDetail() string {
	return detailBuilder.String()
}
07070100000067000081A400000000000000000000000167D9BD4E0000022A000000000000000000000000000000000000002B00000000snowflake-2.11.0/common/version/version.gopackage version

import (
	"fmt"
	"runtime/debug"
)

var version = func() string {
	ver := "2.11.0"
	if info, ok := debug.ReadBuildInfo(); ok {
		var revision string
		var modified string
		for _, setting := range info.Settings {
			switch setting.Key {
			case "vcs.revision":
				revision = setting.Value[:8]
			case "vcs.modified":
				if setting.Value == "true" {
					modified = "*"
				}
			}
		}
		if revision != "" {
			return fmt.Sprintf("%v (%v%v)", ver, revision, modified)
		}
	}
	return ver
}()

func GetVersion() string {
	return version
}
07070100000068000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000002600000000snowflake-2.11.0/common/websocketconn07070100000069000081A400000000000000000000000167D9BD4E00000B67000000000000000000000000000000000000003700000000snowflake-2.11.0/common/websocketconn/websocketconn.gopackage websocketconn

import (
	"io"
	"time"

	"github.com/gorilla/websocket"
)

// An abstraction that makes an underlying WebSocket connection look like a
// net.Conn.
type Conn struct {
	*websocket.Conn
	Reader io.Reader
	Writer io.Writer
}

func (conn *Conn) Read(b []byte) (n int, err error) {
	return conn.Reader.Read(b)
}

func (conn *Conn) Write(b []byte) (n int, err error) {
	return conn.Writer.Write(b)
}

func (conn *Conn) Close() error {
	conn.Reader.(*io.PipeReader).Close()
	conn.Writer.(*io.PipeWriter).Close()
	// Ignore any error in trying to write a Close frame.
	_ = conn.Conn.WriteControl(websocket.CloseMessage, []byte{}, time.Now().Add(time.Second))
	return conn.Conn.Close()
}

func (conn *Conn) SetDeadline(t time.Time) error {
	errRead := conn.Conn.SetReadDeadline(t)
	errWrite := conn.Conn.SetWriteDeadline(t)
	err := errRead
	if err == nil {
		err = errWrite
	}
	return err
}

func readLoop(w io.Writer, ws *websocket.Conn) error {
	var buf [2048]byte
	for {
		messageType, r, err := ws.NextReader()
		if err != nil {
			return err
		}
		if messageType != websocket.BinaryMessage && messageType != websocket.TextMessage {
			continue
		}
		_, err = io.CopyBuffer(w, r, buf[:])
		if err != nil {
			return err
		}
	}
}

func writeLoop(ws *websocket.Conn, r io.Reader) error {
	var buf [2048]byte
	for {
		n, err := r.Read(buf[:])
		if err != nil {
			return err
		}
		err = ws.WriteMessage(websocket.BinaryMessage, buf[:n])
		if err != nil {
			return err
		}
	}
}

// websocket.Conn methods start returning websocket.CloseError after the
// connection has been closed. We want to instead interpret that as io.EOF, just
// as you would find with a normal net.Conn. This only converts
// websocket.CloseErrors with known codes; other codes like CloseProtocolError
// and CloseAbnormalClosure will still be reported as anomalous.
func closeErrorToEOF(err error) error {
	if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) {
		err = io.EOF
	}
	return err
}

// Create a new Conn.
func New(ws *websocket.Conn) *Conn {
	// Set up synchronous pipes to serialize reads and writes to the
	// underlying websocket.Conn.
	//
	// https://godoc.org/github.com/gorilla/websocket#hdr-Concurrency
	// "Connections support one concurrent reader and one concurrent writer.
	// Applications are responsible for ensuring that no more than one
	// goroutine calls the write methods (WriteMessage, etc.) concurrently
	// and that no more than one goroutine calls the read methods
	// (NextReader, etc.) concurrently. The Close and WriteControl methods
	// can be called concurrently with all other methods."
	pr1, pw1 := io.Pipe()
	go func() {
		pw1.CloseWithError(closeErrorToEOF(readLoop(pw1, ws)))
	}()
	pr2, pw2 := io.Pipe()
	go func() {
		pr2.CloseWithError(closeErrorToEOF(writeLoop(ws, pr2)))
	}()
	return &Conn{
		Conn:   ws,
		Reader: pr1,
		Writer: pw2,
	}
}
0707010000006A000081A400000000000000000000000167D9BD4E00002133000000000000000000000000000000000000003C00000000snowflake-2.11.0/common/websocketconn/websocketconn_test.gopackage websocketconn

import (
	"bytes"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"sync"
	"testing"
	"time"

	"github.com/gorilla/websocket"
)

// Returns a (server, client) pair of websocketconn.Conns.
func connPair() (*Conn, *Conn, error) {
	// Will be assigned inside server.Handler.
	var serverConn *Conn

	// Start up a web server to receive the request.
	ln, err := net.Listen("tcp", "127.0.0.1:0")
	if err != nil {
		return nil, nil, err
	}
	defer ln.Close()
	errCh := make(chan error)
	server := http.Server{
		Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
			upgrader := websocket.Upgrader{
				CheckOrigin: func(*http.Request) bool { return true },
			}
			ws, err := upgrader.Upgrade(rw, req, nil)
			if err != nil {
				errCh <- err
				return
			}
			serverConn = New(ws)
			close(errCh)
		}),
	}
	defer server.Close()
	go func() {
		err := server.Serve(ln)
		if err != nil && err != http.ErrServerClosed {
			errCh <- err
		}
	}()

	// Make a request to the web server.
	urlStr := (&url.URL{Scheme: "ws", Host: ln.Addr().String()}).String()
	ws, _, err := (&websocket.Dialer{}).Dial(urlStr, nil)
	if err != nil {
		return nil, nil, err
	}
	clientConn := New(ws)

	// The server is finished when errCh is written to or closed.
	err = <-errCh
	if err != nil {
		return nil, nil, err
	}
	return serverConn, clientConn, nil
}

// Test that you can write in chunks and read the result concatenated.
func TestWrite(t *testing.T) {
	tests := [][][]byte{
		{},
		{[]byte("foo")},
		{[]byte("foo"), []byte("bar")},
		{{}, []byte("foo"), {}, {}, []byte("bar")},
	}

	for _, test := range tests {
		s, c, err := connPair()
		if err != nil {
			t.Fatal(err)
		}

		// This is a little awkward because we need to read to and write
		// from both ends of the Conn, and we need to do it in separate
		// goroutines because otherwise a Write may block waiting for
		// someone to Read it. Here we set up a loop in a separate
		// goroutine, reading from the Conn s and writing to the dataCh
		// and errCh channels, whose ultimate effect in the select loop
		// below is like
		//   data, err := io.ReadAll(s)
		dataCh := make(chan []byte)
		errCh := make(chan error)
		go func() {
			for {
				var buf [1024]byte
				n, err := s.Read(buf[:])
				if err != nil {
					errCh <- err
					return
				}
				p := make([]byte, n)
				copy(p, buf[:])
				dataCh <- p
			}
		}()

		// Write the data to the client side of the Conn, one chunk at a
		// time.
		for i, chunk := range test {
			n, err := c.Write(chunk)
			if err != nil || n != len(chunk) {
				t.Fatalf("%+q Write chunk %d: got (%d, %v), expected (%d, %v)",
					test, i, n, err, len(chunk), nil)
			}
		}
		// We cannot immediately c.Close here, because that closes the
		// connection right away, without waiting for buffered data to
		// be sent.

		// Pull data and err from the server goroutine above.
		var data []byte
		err = nil
	loop:
		for {
			select {
			case p := <-dataCh:
				data = append(data, p...)
			case err = <-errCh:
				break loop
			case <-time.After(100 * time.Millisecond):
				break loop
			}
		}
		s.Close()
		c.Close()

		// Now data and err contain the result of reading everything
		// from s.
		expected := bytes.Join(test, []byte{})
		if err != nil || !bytes.Equal(data, expected) {
			t.Fatalf("%+q ReadAll: got (%+q, %v), expected (%+q, %v)",
				test, data, err, expected, nil)
		}
	}
}

// Test that multiple goroutines may call Read on a Conn simultaneously. Run
// this with
//
//	go test -race
func TestConcurrentRead(t *testing.T) {
	s, c, err := connPair()
	if err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	// Set up multiple threads reading from the same conn.
	errCh := make(chan error, 2)
	var wg sync.WaitGroup
	wg.Add(2)
	for i := 0; i < 2; i++ {
		go func() {
			defer wg.Done()
			_, err := io.Copy(io.Discard, s)
			if err != nil {
				errCh <- err
			}
		}()
	}

	// Write a bunch of data to the other end.
	for i := 0; i < 2000; i++ {
		_, err := fmt.Fprintf(c, "%d", i)
		if err != nil {
			c.Close()
			t.Fatalf("Write: %v", err)
		}
	}
	c.Close()

	wg.Wait()
	close(errCh)

	err = <-errCh
	if err != nil {
		t.Fatalf("Read: %v", err)
	}
}

// Test that multiple goroutines may call Write on a Conn simultaneously. Run
// this with
//
//	go test -race
func TestConcurrentWrite(t *testing.T) {
	s, c, err := connPair()
	if err != nil {
		t.Fatal(err)
	}

	// Set up multiple threads writing to the same conn.
	errCh := make(chan error, 3)
	var wg sync.WaitGroup
	wg.Add(2)
	for i := 0; i < 2; i++ {
		go func() {
			defer wg.Done()
			for j := 0; j < 1000; j++ {
				_, err := fmt.Fprintf(s, "%d", j)
				if err != nil {
					errCh <- err
					break
				}
			}
		}()
	}
	go func() {
		wg.Wait()
		err := s.Close()
		if err != nil {
			errCh <- err
		}
		close(errCh)
	}()

	// Read from the other end.
	_, err = io.Copy(io.Discard, c)
	c.Close()
	if err != nil {
		t.Fatalf("Read: %v", err)
	}

	err = <-errCh
	if err != nil {
		t.Fatalf("Write: %v", err)
	}
}

// Test that Read and Write methods return errors after Close.
func TestClose(t *testing.T) {
	s, c, err := connPair()
	if err != nil {
		t.Fatal(err)
	}
	defer c.Close()

	err = s.Close()
	if err != nil {
		t.Fatal(err)
	}

	var buf [10]byte
	n, err := s.Read(buf[:])
	if n != 0 || err == nil {
		t.Fatalf("Read after Close returned (%v, %v), expected (%v, non-nil)", n, err, 0)
	}

	_, err = s.Write([]byte{1, 2, 3})
	// Here we break the abstraction a little and look for a specific error,
	// io.ErrClosedPipe. This is because we know the Conn uses an io.Pipe
	// internally.
	if err != io.ErrClosedPipe {
		t.Fatalf("Write after Close returned %v, expected %v", err, io.ErrClosedPipe)
	}
}

// Benchmark creating a server websocket.Conn (without the websocketconn.Conn
// wrapper) for different read/write buffer sizes.
func BenchmarkUpgradeBufferSize(b *testing.B) {
	// Buffer size of 0 would mean the default of 4096:
	// https://github.com/gorilla/websocket/blob/v1.5.0/conn.go#L37
	// But a size of zero also has the effect of causing reuse of the HTTP
	// server's buffers. So we test 4096 separately from 0.
	// https://github.com/gorilla/websocket/blob/v1.5.0/server.go#L32
	for _, bufSize := range []int{0, 128, 1024, 2048, 4096, 8192} {
		upgrader := websocket.Upgrader{
			CheckOrigin:     func(*http.Request) bool { return true },
			ReadBufferSize:  bufSize,
			WriteBufferSize: bufSize,
		}
		b.Run(fmt.Sprintf("%d", bufSize), func(b *testing.B) {
			// Start up a web server to receive the request.
			ln, err := net.Listen("tcp", "127.0.0.1:0")
			if err != nil {
				b.Fatal(err)
			}
			defer ln.Close()
			wsCh := make(chan *websocket.Conn)
			errCh := make(chan error)
			server := http.Server{
				Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
					ws, err := upgrader.Upgrade(rw, req, nil)
					if err != nil {
						errCh <- err
						return
					}
					wsCh <- ws
				}),
			}
			defer server.Close()
			go func() {
				err := server.Serve(ln)
				if err != nil && err != http.ErrServerClosed {
					errCh <- err
				}
			}()

			// Make a request to the web server.
			dialer := &websocket.Dialer{
				ReadBufferSize:  bufSize,
				WriteBufferSize: bufSize,
			}
			urlStr := (&url.URL{Scheme: "ws", Host: ln.Addr().String()}).String()

			b.ResetTimer()
			for i := 0; i < b.N; i++ {
				ws, _, err := dialer.Dial(urlStr, nil)
				if err != nil {
					b.Fatal(err)
				}
				ws.Close()

				select {
				case <-wsCh:
				case err := <-errCh:
					b.Fatal(err)
				}
			}
			b.StopTimer()
		})
	}
}

// Benchmark read/write in the client←server and server←client directions, with
// messages of different sizes. Run with -benchmem to see memory allocations.
func BenchmarkReadWrite(b *testing.B) {
	trial := func(b *testing.B, readConn, writeConn *Conn, msgSize int) {
		go func() {
			io.Copy(io.Discard, readConn)
		}()
		data := make([]byte, msgSize)
		b.ResetTimer()
		for i := 0; i < b.N; i++ {
			n, err := writeConn.Write(data[:])
			b.SetBytes(int64(n))
			if err != nil {
				b.Fatal(err)
			}
		}
	}
	for _, msgSize := range []int{150, 3000} {
		s, c, err := connPair()
		if err != nil {
			b.Fatal(err)
		}

		b.Run(fmt.Sprintf("c←s %d", msgSize), func(b *testing.B) {
			trial(b, c, s, msgSize)
		})
		b.Run(fmt.Sprintf("s←c %d", msgSize), func(b *testing.B) {
			trial(b, s, c, msgSize)
		})

		err = s.Close()
		if err != nil {
			b.Fatal(err)
		}
		err = c.Close()
		if err != nil {
			b.Fatal(err)
		}
	}
}
0707010000006B000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001500000000snowflake-2.11.0/doc0707010000006C000081A400000000000000000000000167D9BD4E000026E5000000000000000000000000000000000000002500000000snowflake-2.11.0/doc/broker-spec.txt

                            Snowflake broker protocol

0. Scope and Preliminaries

The Snowflake broker is used to hand out Snowflake proxies to clients using the Snowflake pluggable transport. There are some similarities to the function of the broker and how BridgeDB hands out Tor bridges.

This document specifies how the Snowflake broker interacts with other parts of the Tor ecosystem, starting with the metrics CollecTor module and to be expanded upon later.

1. Metrics Reporting (version 1.1)

Metrics data from the Snowflake broker can be retrieved by sending an HTTP GET request to https://[Snowflake broker URL]/metrics and consists of the following items:

    "snowflake-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL
        [At start, exactly once.]

        YYYY-MM-DD HH:MM:SS defines the end of the included measurement
        interval of length NSEC seconds (86400 seconds by default).

    "snowflake-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL
        [At most once.]

        List of mappings from two-letter country codes to the number of
        unique IP addresses of Snowflake proxies that have polled. Each
        country code only appears once.

    "snowflake-ips-total" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of Snowflake
        proxies that have polled.

    "snowflake-ips-standalone" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies of type "standalone" that have polled.

    "snowflake-ips-badge" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies of type "badge" that have polled.

    "snowflake-ips-webext" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies of type "webext" that have polled.

    "snowflake-idle-count" NUM NL
        [At most once.]

        A count of the number of times a proxy has polled but received
        no client offer, rounded up to the nearest multiple of 8.

    "client-denied-count" NUM NL
        [At most once.]

        A count of the number of times a client has requested a proxy
        from the broker but no proxies were available, rounded up to
        the nearest multiple of 8.

    "client-restricted-denied-count" NUM NL
        [At most once.]

        A count of the number of times a client with a restricted or
        unknown NAT type has requested a proxy from the broker but no
        proxies were available, rounded up to the nearest multiple of 8.

    "client-unrestricted-denied-count" NUM NL
        [At most once.]

        A count of the number of times a client with an unrestricted NAT
        type has requested a proxy from the broker but no proxies were
        available, rounded up to the nearest multiple of 8.

    "client-snowflake-match-count" NUM NL
        [At most once.]

        A count of the number of times a client successfully received a
        proxy from the broker, rounded up to the nearest multiple of 8.

    "client-http-count" NUM NL
        [At most once.]

        A count of the number of times a client has requested a proxy using
        the HTTP rendezvous method from the broker, rounded up to the nearest 
        multiple of 8.
    
    "client-http-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL
        [At most once.]

        List of mappings from two-letter country codes to the number of
        times a client has requested a proxy using the HTTP rendezvous method, 
        rounded up to the nearest multiple of 8.  Each country code only appears 
        once.

    "client-ampcache-count" NUM NL
        [At most once.]

        A count of the number of times a client has requested a proxy using
        the ampcache rendezvous method from the broker, rounded up to the 
        nearest multiple of 8.
    
    "client-ampcache-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL
        [At most once.]

        List of mappings from two-letter country codes to the number of
        times a client has requested a proxy using the ampcache rendezvous 
        method, rounded up to the nearest multiple of 8.  Each country code only 
        appears once.

    "client-sqs-count" NUM NL
        [At most once.]

        A count of the number of times a client has requested a proxy using
        the sqs rendezvous method from the broker, rounded up to the nearest 
        multiple of 8.

    "client-sqs-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL
        [At most once.]

        List of mappings from two-letter country codes to the number of
        times a client has requested a proxy using the sqs rendezvous method, 
        rounded up to the nearest multiple of 8.  Each country code only appears 
        once.

    "snowflake-ips-nat-restricted" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies that have a restricted NAT type.

    "snowflake-ips-nat-unrestricted" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies that have an unrestricted NAT type.

    "snowflake-ips-nat-unknown" NUM NL
        [At most once.]

        A count of the total number of unique IP addresses of snowflake
        proxies that have an unknown NAT type.

   "snowflake-proxy-poll-with-relay-url-count" NUM NL
        [At most once.]

        A count of snowflake proxy polls with relay url extension present.
        This means this proxy understands relay url, and is sending its
        allowed prefix.
   "snowflake-proxy-poll-without-relay-url-count" NUM NL
        [At most once.]

        A count of snowflake proxy polls with relay url extension absent.
        This means this proxy is not yet updated.
   "snowflake-proxy-rejected-for-relay-url-count" NUM NL
        [At most once.]

        A count of snowflake proxy polls with relay url extension rejected
        based on broker's relay url extension policy.
        This means an incompatible allowed relay pattern is included in the
        proxy poll message.
2. Broker messaging specification and endpoints

The broker facilitates the connection of snowflake clients and snowflake proxies
through the exchange of WebRTC SDP information with its endpoints.

2.1. Client interactions with the broker

The broker offers multiple ways for clients to exchange registration
messages.

2.1.1. HTTPS POST

Clients interact with the broker by making a POST request to `/client` with the
offer SDP in the request body:
```
POST /client HTTP

[offer SDP]
```
If the broker is behind a domain-fronted connection, this request is accompanied
with the necessary HOST information.

If the client is matched up with a proxy, they receive a 200 OK response with
the proxy's answer SDP in the request body:
```
HTTP 200 OK

[answer SDP]
```

If no proxies were available, they receive a 503 status code:
```
HTTP 503 Service Unavailable
```

2.1.2. AMP

The broker's /amp/client endpoint receives client poll messages encoded
into the URL path, and sends client poll responses encoded as HTML that
conforms to the requirements of AMP (Accelerated Mobile Pages). This
endpoint is intended to be accessed through an AMP cache, using the
-ampcache option of snowflake-client.

The client encodes its poll message into a GET request as follows:
```
GET /amp/client/0[0 or more bytes]/[base64 of client poll message]
```
The components of the path are as follows:
* "/amp/client/", the root of the endpoint.
* "0", a format version number, which controls the interpretation of the
  rest of the path. Only the first byte matters as a version indicator
  (not the whole first path component).
* Any number of slash or non-slash bytes. These may be used as padding
  or to prevent cache collisions in the AMP cache.
* A final slash.
* base64 encoding of the client poll message, using the URL-safe
  alphabet (which does not include slash).

The broker returns a client poll response message in the HTTP response.
The message is encoded using AMP armor, an AMP-compatible HTML encoding.
The data stream is notionally a "0" byte (a format version indicator)
followed by the base64 encoding of the message (using the standard
alphabet, with "=" padding). This stream is broken into
whitespace-separated chunks, which are then bundled into HTML <pre>
elements. The <pre> elements are then surrounded by AMP boilerplate. To
decode, search the HTML for <pre> elements, concatenate their contents
and join on whitespace, discard the "0" prefix, and base64 decode.

2.2 Proxy interactions with the broker

Proxies poll the broker with a proxy poll request to `/proxy`:

```
POST /proxy HTTP

{
  Sid: [generated session id of proxy],
  Version: 1.3,
  Type: ["badge"|"webext"|"standalone"|"mobile"],
  NAT: ["unknown"|"restricted"|"unrestricted"],
  Clients: [number of current clients, rounded down to multiples of 8],
  AcceptedRelayPattern: [a pattern representing accepted set of relay domains]
}
```

If the request is well-formed, they receive a 200 OK response.

If a client is matched:
```
HTTP 200 OK

{
  Status: "client match",
  {
    type: offer,
    sdp: [WebRTC SDP]
  },
  RelayURL: [the WebSocket URL proxy should connect to relay Snowflake traffic]
}
```

If a client is not matched:
```
HTTP 200 OK

{
    Status: "no match"
}
```

If the request is malformed:
```
HTTP 400 BadRequest
```

If they are matched with a client, they provide their SDP answer with a POST
request to `/answer`:
```
POST /answer HTTP

{
  Sid: [generated session id of proxy],
  Version: 1.3,
  Answer:
  {
    type: answer,
    sdp: [WebRTC SDP]
  }
}
```

If the request is well-formed, they receive a 200 OK response.

If the client retrieved the answer:
```
HTTP 200 OK

{
  Status: "success"
}
```

If the client left:
```
HTTP 200 OK

{
  Status: "client gone"
}

3) If the request is malformed:
HTTP 400 BadRequest
```
0707010000006D000081A400000000000000000000000167D9BD4E00001095000000000000000000000000000000000000002C00000000snowflake-2.11.0/doc/rendezvous-with-sqs.md# Rendezvous with Amazon SQS
This is a new experimental rendezvous method (in addition to the existing HTTPs and AMP cache methods).
It leverages the Amazon SQS Queue service for a client to communicate with the broker server.

## Broker
To run the broker with this rendezvous method, use the following CLI flags (they are both required):
- `broker-sqs-name` - name of the broker SQS queue to listen for incoming messages
- `broker-sqs-region` - name of AWS region of the SQS queue

These two parameters determine the SQS queue URL that the client needs to be run with as a CLI flag in order to communicate with the broker. For example, the following values can be used:

`-broker-sqs-name snowflake-broker -broker-sqs-region us-east-1`

The machine on which the broker is being run must be equiped with the correct AWS configs and credentials that would allow the broker program to create, read from, and write to the SQS queue. These are typically stored at `~/.aws/config` and `~/.aws/credentials`. However, enviornment variables may also be used as described in the [AWS Docs](https://docs.aws.amazon.com/sdkref/latest/guide/creds-config-files.html)

## Client
To run the client with this rendezvous method, use the following CLI flags (they are all required):
- `sqsqueue` - URL of the SQS queue to use as a proxy for signalling
- `sqscreds` - Encoded credentials for accessing the SQS queue

`sqsqueue` should correspond to the URL of the SQS queue that the broker is listening on. 
For the example above, the following value can be used:

`-sqsqueue https://sqs.us-east-1.amazonaws.com/893902434899/snowflake-broker -sqscreds some-encoded-sqs-creds`

*Public access to SQS queues is not allowed, so there needs to be some form of authentication to be able to access the queue. Limited permission credentials will be provided by the Snowflake team to access the corresponding SQS queue.*

## Implementation Details
```
╭――――――――――――――――――╮     ╭――――――――――――――――――╮     ╭――――――――――――――――――╮     ╭―――――――――――――――――-―╮
│      Client      │ <=> │    Amazon SQS    │ <=> │      Broker      │ <=> │  Snowflake Proxy  │
╰――――――――――――――――――╯     ╰――――――――――――――――――╯     ╰――――――――――――――――――╯     ╰――――――――――――――――――-╯
```

1. On startup, the **broker** ensures that an SQS queue with the name of the `broker-sqs-name` parameter exists. It will create such a queue if it doesn’t exist. Afterwards, it will enter a loop of continuously:
    - polling for new messages
    - cleaning up client queues
2. **Client** sends SDP Offer to the SQS queue at the URL provided by the `sqsqueue` parameter using a message with a unique ID (clientID) corresponding to the client along with the contents of the SDP Offer. The client will randomly generate a new ClientID to use each rendezvous attempt.
3. The **broker** will receive this message during its polling and process it.
    -  A client SQS queue with the name `"snowflake-client" + clientID` will be created for the broker to send messages to the client. This is needed because if a queue shared between all clients was used for outgoing messages from the server, then clients would have to pick off the top message, check if it is addressed to them, and then process the message if it is. This means clients would possibly have to check many messages before they find the one addressed to them.
    - When the broker has a response for the client, it will send a message to the client queue with the details of the SDP answer.
    - The SDP offer message from the client is then deleted from the broker queue.
4. The **client** will continuously poll its client queue and eventually receive the message with the SDP answer from the broker.
5. The broker server will periodically clean up the unique SQS queues it has created for each client once the queues are no longer needed (it will delete queues that were last modified before a certain amount of time ago)0707010000006E000081A400000000000000000000000167D9BD4E00000442000000000000000000000000000000000000002800000000snowflake-2.11.0/doc/snowflake-client.1.TH SNOWFLAKE-CLIENT "1" "July 2021" "snowflake-client" "User Commands"
.SH NAME
snowflake-client \- WebRTC pluggable transport client for Tor
.SH DESCRIPTION
Snowflake helps users circumvent censorship by making a WebRTC
connection to volunteer proxies. These proxies relay Tor traffic to a
Snowflake bridge and then through the Tor network.
.SS "Usage of snowflake-client:"
.HP
\fB\-ampcache\fR string
.IP
URL of AMP cache to use as a proxy for signaling
.HP
\fB\-front\fR string
.IP
front domain
.HP
\fB\-ice\fR string
.IP
comma\-separated list of ICE servers
.HP
\fB\-keep\-local\-addresses\fR
.IP
keep local LAN address ICE candidates
.HP
\fB\-log\fR string
.IP
name of log file
.HP
\fB\-log\-to\-state\-dir\fR
.IP
resolve the log file relative to tor's pt state dir
.HP
\fB\-logToStateDir\fR
.IP
use \fB\-log\-to\-state\-dir\fR instead
.HP
\fB\-max\fR int
.IP
capacity for number of multiplexed WebRTC peers (default 1)
.HP
\fB\-unsafe\-logging\fR
.IP
prevent logs from being scrubbed
.HP
\fB\-url\fR string
.IP
URL of signaling broker
.SH "SEE ALSO"
https://snowflake.torproject.org
0707010000006F000081A400000000000000000000000167D9BD4E000003A9000000000000000000000000000000000000002700000000snowflake-2.11.0/doc/snowflake-proxy.1.TH SNOWFLAKE-PROXY "1" "June 2021" "swnoflake-proxy" "User Commands"
.SH NAME
snowflake-proxy \- WebRTC pluggable transport proxy for Tor
.SH DESCRIPTION
Snowflake helps users circumvent censorship by making a WebRTC
connection to volunteer proxies. These proxies relay Tor traffic to a
Snowflake bridge and then through the Tor network.
.SS "Usage of snowflake-proxy:"
.HP
\fB\-broker\fR string
.IP
broker URL (default "https://snowflake\-broker.torproject.net/")
.HP
\fB\-capacity\fR uint
.IP
maximum concurrent clients (default 10)
.HP
\fB\-keep\-local\-addresses\fR
.IP
keep local LAN address ICE candidates
.HP
\fB\-log\fR string
.IP
log filename
.HP
\fB\-relay\fR string
.IP
websocket relay URL (default "wss://snowflake.torproject.net/")
.HP
\fB\-stun\fR string
.IP
stun URL (default "stun:stun.l.google.com:19302")
.HP
\fB\-unsafe\-logging\fR
.IP
prevent logs from being scrubbed
.SH "SEE ALSO"
https://snowflake.torproject.org
07070100000070000081A400000000000000000000000167D9BD4E000011E3000000000000000000000000000000000000003400000000snowflake-2.11.0/doc/using-the-snowflake-library.mdSnowflake is available as a general-purpose pluggable transports library and adheres to the [pluggable transports v2.1 Go API](https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf).

### Client library

The Snowflake client library contains functions for running a Snowflake client.

Example usage:

```Golang
package main

import (
    "log"

    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib"
)

func main() {

    config := sf.ClientConfig{
        BrokerURL:   "https://snowflake-broker.example.com",
        FrontDomain: "https://friendlyfrontdomain.net",
        ICEAddresses: []string{
            "stun:stun.voip.blackberry.com:3478",
            },
        Max: 1,
    }
    transport, err := sf.NewSnowflakeClient(config)
    if err != nil {
        log.Fatal("Failed to start snowflake transport: ", err)
    }

    // transport implements the ClientFactory interface and returns a net.Conn
    conn, err := transport.Dial()
    if err != nil {
        log.Printf("dial error: %s", err)
        return
    }
    defer conn.Close()

    // ...

}
```

#### Using your own rendezvous method

You can define and use your own rendezvous method to communicate with a Snowflake broker by implementing the `RendezvousMethod` interface.

```Golang

package main

import (
    "log"

    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib"
)

type StubMethod struct {
}

func (m *StubMethod) Exchange(pollReq []byte) ([]byte, error) {
    var brokerResponse []byte
    var err error

    //Implement the logic you need to communicate with the Snowflake broker here

    return brokerResponse, err
}

func main() {
    config := sf.ClientConfig{
        ICEAddresses:       []string{
            "stun:stun.voip.blackberry.com:3478",
            },
    }
    transport, err := sf.NewSnowflakeClient(config)
    if err != nil {
        log.Fatal("Failed to start snowflake transport: ", err)
    }

    // custom rendezvous methods can be set with `SetRendezvousMethod`
    rendezvous := &StubMethod{}
    transport.SetRendezvousMethod(rendezvous)

    // transport implements the ClientFactory interface and returns a net.Conn
    conn, err := transport.Dial()
    if err != nil {
        log.Printf("dial error: %s", err)
        return
    }
    defer conn.Close()

    // ...

}
```

### Server library

The Snowflake server library contains functions for running a Snowflake server.

Example usage:
```Golang

package main

import (
    "log"
    "net"

    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/server/lib"
    "golang.org/x/crypto/acme/autocert"
)

func main() {

    // The snowflake server runs a websocket server. To run this securely, you will
    // need a valid certificate.
    certManager := &autocert.Manager{
        Prompt:     autocert.AcceptTOS,
        HostPolicy: autocert.HostWhitelist("snowflake.yourdomain.com"),
        Email:      "you@yourdomain.com",
    }

    transport := sf.NewSnowflakeServer(certManager.GetCertificate)

    addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:443")
    if err != nil {
        log.Printf("error resolving bind address: %s", err.Error())
    }
    numKCPInstances := 1
    ln, err := transport.Listen(addr, numKCPInstances)
    if err != nil {
        log.Printf("error opening listener: %s", err.Error())
    }
    for {
        conn, err := ln.Accept()
        if err != nil {
            if err, ok := err.(net.Error); ok && err.Temporary() {
                continue
            }
            log.Printf("Snowflake accept error: %s", err)
            break
        }
        go func() {
            // ...

            defer conn.Close()
        }()
    }

    // ...

}

```
### Running your own Snowflake infrastructure

At the moment we do not have the ability to share Snowfake infrastructure between different types of applications. If you are planning on using Snowflake as a transport for your application, you will need to:

- Run a Snowflake broker. See our [broker documentation](../broker/) and [installation guide](https://gitlab.torproject.org/tpo/anti-censorship/team/-/wikis/Survival-Guides/Snowflake-Broker-Installation-Guide) for more information

- Run Snowflake proxies. These can be run as [standalone Go proxies](../proxy/) or [browser-based proxies](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext).
07070100000071000081A400000000000000000000000167D9BD4E00000F4C000000000000000000000000000000000000001800000000snowflake-2.11.0/go.modmodule gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2

go 1.21

require (
	github.com/aws/aws-sdk-go-v2 v1.36.1
	github.com/aws/aws-sdk-go-v2/config v1.29.6
	github.com/aws/aws-sdk-go-v2/credentials v1.17.59
	github.com/aws/aws-sdk-go-v2/service/sqs v1.37.14
	github.com/golang/mock v1.6.0
	github.com/gorilla/websocket v1.5.3
	github.com/miekg/dns v1.1.63
	github.com/pion/ice/v4 v4.0.7
	github.com/pion/sdp/v3 v3.0.11
	github.com/pion/stun/v3 v3.0.0
	github.com/pion/transport/v3 v3.0.7
	github.com/pion/webrtc/v4 v4.0.13
	github.com/prometheus/client_golang v1.21.0
	github.com/realclientip/realclientip-go v1.0.0
	github.com/refraction-networking/utls v1.6.7
	github.com/smartystreets/goconvey v1.8.1
	github.com/stretchr/testify v1.10.0
	github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301
	github.com/xtaci/kcp-go/v5 v5.6.8
	github.com/xtaci/smux v1.5.34
	gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01
	gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0
	gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250130151315-efaf4e0ec0d3
	golang.org/x/crypto v0.33.0
	golang.org/x/net v0.35.0
	golang.org/x/sys v0.30.0
)

require (
	github.com/andybalholm/brotli v1.0.6 // indirect
	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect
	github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect
	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect
	github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
	github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect
	github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect
	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect
	github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect
	github.com/aws/smithy-go v1.22.2 // indirect
	github.com/beorn7/perks v1.0.1 // indirect
	github.com/cespare/xxhash/v2 v2.3.0 // indirect
	github.com/cloudflare/circl v1.3.7 // indirect
	github.com/davecgh/go-spew v1.1.1 // indirect
	github.com/google/uuid v1.6.0 // indirect
	github.com/gopherjs/gopherjs v1.17.2 // indirect
	github.com/jtolds/gls v4.20.0+incompatible // indirect
	github.com/klauspost/compress v1.17.11 // indirect
	github.com/klauspost/cpuid/v2 v2.2.6 // indirect
	github.com/klauspost/reedsolomon v1.12.0 // indirect
	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
	github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
	github.com/pion/datachannel v1.5.10 // indirect
	github.com/pion/dtls/v3 v3.0.4 // indirect
	github.com/pion/interceptor v0.1.37 // indirect
	github.com/pion/logging v0.2.3 // indirect
	github.com/pion/mdns/v2 v2.0.7 // indirect
	github.com/pion/randutil v0.1.0 // indirect
	github.com/pion/rtcp v1.2.15 // indirect
	github.com/pion/rtp v1.8.12 // indirect
	github.com/pion/sctp v1.8.37 // indirect
	github.com/pion/srtp/v3 v3.0.4 // indirect
	github.com/pion/turn/v4 v4.0.0 // indirect
	github.com/pkg/errors v0.9.1 // indirect
	github.com/pmezard/go-difflib v1.0.0 // indirect
	github.com/prometheus/client_model v0.6.1 // indirect
	github.com/prometheus/common v0.62.0 // indirect
	github.com/prometheus/procfs v0.15.1 // indirect
	github.com/smarty/assertions v1.15.0 // indirect
	github.com/templexxx/cpu v0.1.0 // indirect
	github.com/templexxx/xorsimd v0.4.2 // indirect
	github.com/tjfoc/gmsm v1.4.1 // indirect
	github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf // indirect
	github.com/wlynxg/anet v0.0.5 // indirect
	golang.org/x/mod v0.18.0 // indirect
	golang.org/x/sync v0.11.0 // indirect
	golang.org/x/text v0.22.0 // indirect
	golang.org/x/tools v0.22.0 // indirect
	google.golang.org/protobuf v1.36.1 // indirect
	gopkg.in/yaml.v3 v3.0.1 // indirect
)
07070100000072000081A400000000000000000000000167D9BD4E000067A6000000000000000000000000000000000000001800000000snowflake-2.11.0/go.sumcloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E=
github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg=
github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ=
github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4=
github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE=
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.14 h1:KSVbQW2umLp7i4Lo6mvBUz5PqV+Ze/IL6LCTasxQWEk=
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.14/go.mod h1:jiaEkIw2Bb6IsoY9PDAZqVXJjNaKSxQGGj10CiloDWU=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/reedsolomon v1.12.0 h1:I5FEp3xSwVCcEh3F5A7dofEfhXdF/bWhQWPH+XwBFno=
github.com/klauspost/reedsolomon v1.12.0/go.mod h1:EPLZJeh4l27pUGC3aXOjheaoh1I9yut7xTURiW3LQ9Y=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
github.com/pion/ice/v4 v4.0.7 h1:mnwuT3n3RE/9va41/9QJqN5+Bhc0H/x/ZyiVlWMw35M=
github.com/pion/ice/v4 v4.0.7/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.12 h1:nsKs8Wi0jQyBFHU3qmn/OvtZrhktVfJY0vRxwACsL5U=
github.com/pion/rtp v1.8.12/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
github.com/pion/sctp v1.8.37 h1:ZDmGPtRPX9mKCiVXtMbTWybFw3z/hVKAZgU81wcOrqs=
github.com/pion/sctp v1.8.37/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.11 h1:VhgVSopdsBKwhCFoyyPmT1fKMeV9nLMrEKxNOdy3IVI=
github.com/pion/sdp/v3 v3.0.11/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/webrtc/v4 v4.0.13 h1:XuUaWTjRufsiGJRC+G71OgiSMe7tl7mQ0kkd4bAqIaQ=
github.com/pion/webrtc/v4 v4.0.13/go.mod h1:Fadzxm0CbY99YdCEfxrgiVr0L4jN1l8bf8DBkPPpJbs=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/realclientip/realclientip-go v1.0.0 h1:+yPxeC0mEaJzq1BfCt2h4BxlyrvIIBzR6suDc3BEF1U=
github.com/realclientip/realclientip-go v1.0.0/go.mod h1:CXnUdVwFRcXFJIRb/dTYqbT7ud48+Pi2pFm80bxDmcI=
github.com/refraction-networking/utls v1.6.7 h1:zVJ7sP1dJx/WtVuITug3qYUq034cDq9B2MR1K67ULZM=
github.com/refraction-networking/utls v1.6.7/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/templexxx/cpu v0.1.0 h1:wVM+WIJP2nYaxVxqgHPD4wGA2aJ9rvrQRV8CvFzNb40=
github.com/templexxx/cpu v0.1.0/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xorsimd v0.4.2 h1:ocZZ+Nvu65LGHmCLZ7OoCtg8Fx8jnHKK37SjvngUoVI=
github.com/templexxx/xorsimd v0.4.2/go.mod h1:HgwaPoDREdi6OnULpSfxhzaiiSUY4Fi3JPn1wpt28NI=
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf h1:7PflaKRtU4np/epFxRXlFhlzLXZzKFrH5/I4so5Ove0=
github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf/go.mod h1:CLUSJbazqETbaR+i0YAhXBICV9TrKH93pziccMhmhpM=
github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301 h1:d/Wr/Vl/wiJHc3AHYbYs5I3PucJvRuw3SvbmlIRf+oM=
github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301/go.mod h1:ntmMHL/xPq1WLeKiw8p/eRATaae6PiVRNipHFJxI8PM=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/xtaci/kcp-go/v5 v5.6.8 h1:jlI/0jAyjoOjT/SaGB58s4bQMJiNS41A2RKzR6TMWeI=
github.com/xtaci/kcp-go/v5 v5.6.8/go.mod h1:oE9j2NVqAkuKO5o8ByKGch3vgVX3BNf8zqP8JiGq0bM=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
github.com/xtaci/smux v1.5.34 h1:OUA9JaDFHJDT8ZT3ebwLWPAgEfE6sWo2LaTy3anXqwg=
github.com/xtaci/smux v1.5.34/go.mod h1:OMlQbT5vcgl2gb49mFkYo6SMf+zP3rcjcwQz7ZU7IGY=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01 h1:4949mHh9Vj2/okk48yG8nhP6TosFWOUfSfSr502sKGE=
gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01/go.mod h1:K3LOI4H8fa6j+7E10ViHeGEQV10304FG4j94ypmKLjY=
gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0 h1:KD9m+mRBwtEdqe94Sv72uiedMWeRdIr4sXbrRyzRiIo=
gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0/go.mod h1:70bhd4JKW/+1HLfm+TMrgHJsUHG4coelMWwiVEJ2gAg=
gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250130151315-efaf4e0ec0d3 h1:pwWCiqrB6b3SynILsv3M+76utmcgMiTZ2aqfccjWmxo=
gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250130151315-efaf4e0ec0d3/go.mod h1:PK7EvweKeypdelDyh1m7N922aldSeCAG8n0lJ7RAXWQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
07070100000073000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001B00000000snowflake-2.11.0/probetest07070100000074000081A400000000000000000000000167D9BD4E00000029000000000000000000000000000000000000002600000000snowflake-2.11.0/probetest/DockerfileFROM golang:1.23

COPY probetest /go/bin
07070100000075000081A400000000000000000000000167D9BD4E0000074B000000000000000000000000000000000000002500000000snowflake-2.11.0/probetest/README.md<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Overview](#overview)
- [Running your own](#running-your-own)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

This is code for a remote probe test component of Snowflake.

### Overview

This is a probe test server to allow proxies to test their compatability
with Snowflake. Right now the only type of test implemented is a
compatability check for clients with symmetric NATs.

### Running your own

The server uses TLS by default.
There is a `--disable-tls` option for testing purposes,
but you should use TLS in production.

To build the probe server, run
```go build```

To deploy the probe server, first set the necessary env variables with
```
export HOSTNAMES=${YOUR HOSTNAMES}
export EMAIL=${YOUR EMAIL}
```
then run ```docker-compose up```

Setting up a symmetric NAT configuration requires a few extra steps. After
upping the docker container, run
```docker inspect snowflake-probetest```
to find the subnet used by the probetest container. Then run
```sudo iptables -L -t nat``` to find the POSTROUTING rules for the subnet.
It should look something like this:
```
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination
MASQUERADE  all  --  172.19.0.0/16        anywhere
```
to modify this rule, execute the command
```sudo iptables -t nat -R POSTROUTING $RULE_NUM -s 172.19.0.0/16 -j MASQUERADE --random```
where RULE_NUM is the numbered rule corresponding to your docker container's subnet masquerade rule.
Afterwards, you should see the rule changed to be:
```
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination
MASQUERADE  all  --  172.19.0.0/16        anywhere      random
```
07070100000076000081A400000000000000000000000167D9BD4E0000018E000000000000000000000000000000000000002E00000000snowflake-2.11.0/probetest/docker-compose.yml version: "3.8"

 services:
    snowflake-probetest:
        build: .
        container_name: snowflake-probetest
        ports:
         - "8443:8443"
        volumes:
        - /home/snowflake-broker/acme-cert-cache:/go/bin/acme-cert-cache
        entrypoint: [ "probetest" , "-addr", ":8443" , "-acme-hostnames", $HOSTNAMES, "-acme-email", $EMAIL, "-acme-cert-cache", "/go/bin/acme-cert-cache"]
07070100000077000081A400000000000000000000000167D9BD4E0000273B000000000000000000000000000000000000002800000000snowflake-2.11.0/probetest/probetest.go/*
Probe test server to check the reachability of Snowflake proxies from
clients with symmetric NATs.

The probe server receives an offer from a proxy, returns an answer, and then
attempts to establish a datachannel connection to that proxy. The proxy will
self-determine whether the connection opened successfully.
*/
package main

import (
	"crypto/tls"
	"flag"
	"fmt"
	"io"
	"log"
	"net"
	"net/http"
	"os"
	"strings"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"

	"github.com/pion/transport/v3/stdnet"
	"github.com/pion/webrtc/v4"
	"golang.org/x/crypto/acme/autocert"
)

const (
	// Maximum number of bytes to be read from an HTTP request
	readLimit = 100000
	// Time after which we assume proxy data channel will not open
	dataChannelOpenTimeout = 20 * time.Second
	// How long to wait after the data channel has been open before closing the peer connection.
	dataChannelCloseTimeout = 5 * time.Second
	// Default STUN URL
	defaultStunUrls = "stun:stun.l.google.com:19302,stun:stun.voip.blackberry.com:3478"
)

type ProbeHandler struct {
	stunURL string
	handle  func(string, http.ResponseWriter, *http.Request)
}

func (h ProbeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	h.handle(h.stunURL, w, r)
}

// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE
// candidates is complete and the answer is available in LocalDescription.
func makePeerConnectionFromOffer(stunURL string, sdp *webrtc.SessionDescription,
	dataChanOpen chan struct{}, dataChanClosed chan struct{}, iceGatheringTimeout time.Duration) (*webrtc.PeerConnection, error) {

	settingsEngine := webrtc.SettingEngine{}

	settingsEngine.SetIPFilter(func(ip net.IP) (keep bool) {
		// `IsLoopback()` and `IsUnspecified` are likely not neded here,
		// but let's keep them just in case.
		// FYI there is similar code in other files in this project.
		keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified()
		return
	})
	// FYI this is `false` by default anyway as of pion/webrtc@4
	settingsEngine.SetIncludeLoopbackCandidate(false)

	// Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet
	// to functionally revert a new change in pion by silently ignoring
	// when net.Interfaces() fails, rather than throwing an error
	vnet, _ := stdnet.NewNet()
	settingsEngine.SetNet(vnet)
	api := webrtc.NewAPI(webrtc.WithSettingEngine(settingsEngine))

	config := webrtc.Configuration{
		ICEServers: []webrtc.ICEServer{
			{
				URLs: strings.Split(stunURL, ","),
			},
		},
	}
	pc, err := api.NewPeerConnection(config)
	if err != nil {
		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
	}
	pc.OnDataChannel(func(dc *webrtc.DataChannel) {
		dc.OnOpen(func() {
			close(dataChanOpen)
		})
		dc.OnClose(func() {
			close(dataChanClosed)
			dc.Close()
		})
	})
	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
	// We have to wait for candidate gathering to complete
	// before we send the offer
	done := webrtc.GatheringCompletePromise(pc)
	err = pc.SetRemoteDescription(*sdp)
	if err != nil {
		if inerr := pc.Close(); inerr != nil {
			log.Printf("unable to call pc.Close after pc.SetRemoteDescription with error: %v", inerr)
		}
		return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err)
	}

	answer, err := pc.CreateAnswer(nil)
	if err != nil {
		if inerr := pc.Close(); inerr != nil {
			log.Printf("ICE gathering has generated an error when calling pc.Close: %v", inerr)
		}
		return nil, err
	}

	err = pc.SetLocalDescription(answer)
	if err != nil {
		if err = pc.Close(); err != nil {
			log.Printf("pc.Close after setting local description returned : %v", err)
		}
		return nil, err
	}

	// Wait for ICE candidate gathering to complete,
	// or for whatever we managed to gather before the client times out.
	// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40230
	select {
	case <-done:
	case <-time.After(iceGatheringTimeout):
	}
	return pc, nil
}

func probeHandler(stunURL string, w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Access-Control-Allow-Origin", "*")
	resp, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
	if nil != err {
		log.Println("Invalid data.")
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	offer, _, err := messages.DecodePollResponse(resp)
	if err != nil {
		log.Printf("Error reading offer: %s", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	if offer == "" {
		log.Printf("Error processing session description: %s", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}
	sdp, err := util.DeserializeSessionDescription(offer)
	if err != nil {
		log.Printf("Error processing session description: %s", err.Error())
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	dataChanOpen := make(chan struct{})
	dataChanClosed := make(chan struct{})
	// TODO refactor: DRY this must be below `ResponseHeaderTimeout` in proxy
	// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/blob/e1d9b4ace69897521cc29585b5084c5f4d1ce874/proxy/lib/snowflake.go#L207
	iceGatheringTimeout := 10 * time.Second
	pc, err := makePeerConnectionFromOffer(stunURL, sdp, dataChanOpen, dataChanClosed, iceGatheringTimeout)
	if err != nil {
		log.Printf("Error making WebRTC connection: %s", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	// We'll set this to `false` if the signaling (this function) succeeds.
	closePcOnReturn := true
	defer func() {
		if closePcOnReturn {
			if err := pc.Close(); err != nil {
				log.Printf("Error calling pc.Close: %v", err)
			}
		}
		// Otherwise it must be closed below, wherever `closePcOnReturn` is set to `false`.
	}()

	answer, err := util.SerializeSessionDescription(pc.LocalDescription())
	if err != nil {
		log.Printf("Error making WebRTC connection: %s", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	body, err := messages.EncodeAnswerRequest(answer, "stub-sid")
	if err != nil {
		log.Printf("Error making WebRTC connection: %s", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	w.Write(body)
	// Set a timeout on peerconnection. If the connection state has not
	// advanced to PeerConnectionStateConnected in this time,
	// destroy the peer connection and return the token.
	closePcOnReturn = false
	go func() {
		timer := time.NewTimer(dataChannelOpenTimeout)
		defer timer.Stop()

		select {
		case <-dataChanOpen:
			// Let's not close the `PeerConnection` immediately now,
			// instead let's wait for the peer (or timeout)
			// to close the connection,
			// in order to ensure that the DataChannel also gets opened
			// on the proxy's side.
			// Otherwise the proxy might receive the "close PeerConnection"
			// "event" before they receive "dataChannel.OnOpen",
			// which would wrongly result in a "restricted" NAT.
			// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40387
			select {
			case <-dataChanClosed:
			case <-time.After(dataChannelCloseTimeout):
			}
		case <-timer.C:
		}

		if err := pc.Close(); err != nil {
			log.Printf("Error calling pc.Close: %v", err)
		}
	}()
	return

}

func main() {
	var acmeEmail string
	var acmeHostnamesCommas string
	var acmeCertCacheDir string
	var addr string
	var disableTLS bool
	var certFilename, keyFilename string
	var unsafeLogging bool
	var stunURL string

	flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications")
	flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate")
	flag.StringVar(&acmeCertCacheDir, "acme-cert-cache", "acme-cert-cache", "directory in which certificates should be cached")
	flag.StringVar(&certFilename, "cert", "", "TLS certificate file")
	flag.StringVar(&keyFilename, "key", "", "TLS private key file")
	flag.StringVar(&addr, "addr", ":8443", "address to listen on")
	flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS")
	flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed")
	flag.StringVar(&stunURL, "stun", defaultStunUrls, "STUN servers to use for NAT traversal (comma-separated)")
	flag.Parse()

	var logOutput io.Writer = os.Stderr
	if unsafeLogging {
		log.SetOutput(logOutput)
	} else {
		// Scrub log output just in case an address ends up there
		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
	}

	log.SetFlags(log.LstdFlags | log.LUTC)

	http.Handle("/probe", ProbeHandler{stunURL, probeHandler})

	server := http.Server{
		Addr: addr,
	}

	var err error
	if acmeHostnamesCommas != "" {
		acmeHostnames := strings.Split(acmeHostnamesCommas, ",")
		log.Printf("ACME hostnames: %q", acmeHostnames)

		var cache autocert.Cache
		if err = os.MkdirAll(acmeCertCacheDir, 0700); err != nil {
			log.Printf("Warning: Couldn't create cache directory %q (reason: %s) so we're *not* using our certificate cache.", acmeCertCacheDir, err)
		} else {
			cache = autocert.DirCache(acmeCertCacheDir)
		}

		certManager := autocert.Manager{
			Cache:      cache,
			Prompt:     autocert.AcceptTOS,
			HostPolicy: autocert.HostWhitelist(acmeHostnames...),
			Email:      acmeEmail,
		}
		// start certificate manager handler
		go func() {
			log.Printf("Starting HTTP-01 listener")
			log.Fatal(http.ListenAndServe(":80", certManager.HTTPHandler(nil)))
		}()

		server.TLSConfig = &tls.Config{GetCertificate: certManager.GetCertificate}
		err = server.ListenAndServeTLS("", "")
	} else if certFilename != "" && keyFilename != "" {
		err = server.ListenAndServeTLS(certFilename, keyFilename)
	} else if disableTLS {
		err = server.ListenAndServe()
	} else {
		log.Fatal("the --cert and --key, --acme-hostnames, or --disable-tls option is required")
	}

	if err != nil {
		log.Println(err)
	}
}
07070100000078000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001700000000snowflake-2.11.0/proxy07070100000079000081A400000000000000000000000167D9BD4E000013FB000000000000000000000000000000000000002100000000snowflake-2.11.0/proxy/README.md<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Dependencies](#dependencies)
- [Building the standalone Snowflake proxy](#building-the-standalone-snowflake-proxy)
- [Running a standalone Snowflake proxy](#running-a-standalone-snowflake-proxy)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

This is a standalone (not browser-based) version of the Snowflake proxy. For browser-based versions of the Snowflake proxy, see https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext.

### Dependencies

- Go 1.15+
- We use the [pion/webrtc](https://github.com/pion/webrtc) library for WebRTC communication with Snowflake proxies. Note: running `go get` will fetch this dependency automatically during the build process.

### Building the standalone Snowflake proxy

To build the Snowflake proxy, make sure you are in the `proxy/` directory, and then run:

```
go get
go build
```

### Running a standalone Snowflake proxy

The Snowflake proxy can be run with the following options:

<!-- These are generated with `go run . --help` -->

```
Usage of ./proxy:
  -allow-non-tls-relay
        allow this proxy to pass client's data to the relay in an unencrypted form.
        This is only useful if the relay doesn't support encryption, e.g. for testing / development purposes.
  -allow-proxying-to-private-addresses
        allow forwarding client connections to private IP addresses.
        Useful when a Snowflake server (relay) is hosted on the same private network as this proxy.
  -allowed-relay-hostname-pattern string
        this proxy will only be allowed to forward client connections to relays (servers) whose URL matches this pattern.
        Note that a pattern "example.com$" will match "subdomain.example.com" as well as "other-domain-example.com".
        In order to only match "example.com", prefix the pattern with "^": "^example.com$" (default "snowflake.torproject.net$")
  -broker URL
        The URL of the broker server that the proxy will be using to find clients (default "https://snowflake-broker.torproject.net/")
  -capacity uint
        maximum concurrent clients (default is to accept an unlimited number of clients)
  -disable-stats-logger
        disable the exposing mechanism for stats using logs
  -ephemeral-ports-range range
        Set the range of ports used for client connections (format:"<min>:<max>").
        Useful in conjunction with port forwarding, in order to make the proxy NAT type "unrestricted".
        If omitted, the ports will be chosen automatically from a wide range.
        When specifying the range, make sure it's at least 2x as wide as the amount of clients that you are hoping to serve concurrently (see the "capacity" flag).
  -keep-local-addresses
        keep local LAN address ICE candidates.
        This is usually pointless because Snowflake clients don't usually reside on the same local network as the proxy.
  -log filename
        log filename. If not specified, logs will be output to stderr (console).
  -metrics
        enable the exposing mechanism for stats using metrics
  -metrics-address address
        set listen address for metrics service (default "localhost")
  -metrics-port int
        set port for the metrics service (default 9999)
  -nat-probe-server URL
        The URL of the server that this proxy will use to check its network NAT type.
        Determining NAT type helps to understand whether this proxy is compatible with certain clients' NAT (default "https://snowflake-broker.torproject.net:8443/probe")
  -nat-retest-interval duration
        the time interval between NAT type is retests (see "nat-probe-server"). 0s disables retest. Valid time units are "s", "m", "h". (default 24h0m0s)
  -outbound-address address
        prefer the given address as outbound address for client connections
  -poll-interval duration
        how often to ask the broker for a new client. Keep in mind that asking for a client will not always result in getting one. Minumum value is 2s. Valid time units are "ms", "s", "m", "h". (default 5s)
  -relay URL
        The default URL of the server (relay) that this proxy will forward client connections to, in case the broker itself did not specify the said URL (default "wss://snowflake.torproject.net/")
  -stun URL
        STUN server `URL` that this proxy will use will use to, among some other things, determine its public IP address (default "stun:stun.l.google.com:19302")
  -summary-interval duration
        the time interval between summary log outputs, 0s disables summaries. Valid time units are "s", "m", "h". (default 1h0m0s)
  -unsafe-logging
        keep IP addresses and other sensitive info in the logs
  -verbose
        increase log verbosity
  -version
        display version info to stderr and quit
```

For more information on how to run a Snowflake proxy in deployment, see our [community documentation](https://community.torproject.org/relay/setup/snowflake/standalone/).
0707010000007A000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001B00000000snowflake-2.11.0/proxy/lib0707010000007B000081A400000000000000000000000167D9BD4E00000B43000000000000000000000000000000000000002600000000snowflake-2.11.0/proxy/lib/metrics.gopackage snowflake_proxy

import (
	"net/http"

	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/promhttp"
)

const (
	// metricNamespace represent prometheus namespace
	metricNamespace = "tor_snowflake_proxy"
)

type Metrics struct {
	totalInBoundTraffic    prometheus.Counter
	totalOutBoundTraffic   prometheus.Counter
	totalConnections       *prometheus.CounterVec
	totalFailedConnections prometheus.Counter
}

func NewMetrics() *Metrics {
	return &Metrics{
		totalConnections: prometheus.NewCounterVec(prometheus.CounterOpts{
			Namespace: metricNamespace,
			Name:      "connections_total",
			Help:      "The total number of successful connections handled by the snowflake proxy",
		},
			[]string{"country"},
		),
		totalFailedConnections: prometheus.NewCounter(prometheus.CounterOpts{
			Namespace: metricNamespace,
			Name:      "connection_timeouts_total",
			Help:      "The total number of client connection attempts that failed after successful rendezvous. Note that failures can occur for reasons outside of the proxy's control, such as the client's NAT and censorship situation.",
		}),
		totalInBoundTraffic: prometheus.NewCounter(prometheus.CounterOpts{
			Namespace: metricNamespace,
			Name:      "traffic_inbound_bytes_total",
			Help:      "The total in bound traffic by the snowflake proxy (KB)",
		}),
		totalOutBoundTraffic: prometheus.NewCounter(prometheus.CounterOpts{
			Namespace: metricNamespace,
			Name:      "traffic_outbound_bytes_total",
			Help:      "The total out bound traffic by the snowflake proxy (KB)",
		}),
	}
}

// Start register the metrics server and serve them on the given address
func (m *Metrics) Start(addr string) error {
	go func() {
		http.Handle("/internal/metrics", promhttp.Handler())
		if err := http.ListenAndServe(addr, nil); err != nil {
			panic(err)
		}
	}()

	return prometheus.Register(m)
}

func (m *Metrics) Collect(ch chan<- prometheus.Metric) {
	m.totalConnections.Collect(ch)
	m.totalFailedConnections.Collect(ch)
	m.totalInBoundTraffic.Collect(ch)
	m.totalOutBoundTraffic.Collect(ch)
}

func (m *Metrics) Describe(descs chan<- *prometheus.Desc) {
	prometheus.DescribeByCollect(m, descs)
}

// TrackInBoundTraffic counts the received traffic by the snowflake proxy
func (m *Metrics) TrackInBoundTraffic(value int64) {
	m.totalInBoundTraffic.Add(float64(value))
}

// TrackOutBoundTraffic counts the transmitted traffic by the snowflake proxy
func (m *Metrics) TrackOutBoundTraffic(value int64) {
	m.totalOutBoundTraffic.Add(float64(value))
}

// TrackNewConnection counts the new connections
func (m *Metrics) TrackNewConnection(country string) {
	m.totalConnections.
		With(prometheus.Labels{"country": country}).
		Inc()
}

// TrackFailedConnection counts failed connection attempts
func (m *Metrics) TrackFailedConnection() {
	m.totalFailedConnections.Inc()
}
0707010000007C000081A400000000000000000000000167D9BD4E00004956000000000000000000000000000000000000002C00000000snowflake-2.11.0/proxy/lib/proxy-go_test.gopackage snowflake_proxy

import (
	"bytes"
	"fmt"
	"io"
	"net"
	"net/http"
	"strconv"
	"strings"
	"testing"

	"github.com/pion/webrtc/v4"
	. "github.com/smartystreets/goconvey/convey"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
)

// Set up a mock broker to communicate with
type MockTransport struct {
	statusOverride int
	body           []byte
}

// Just returns a response with fake SDP answer.
func (m *MockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	s := io.NopCloser(bytes.NewReader(m.body))
	r := &http.Response{
		StatusCode: m.statusOverride,
		Body:       s,
	}
	return r, nil
}

// Set up a mock faulty transport
type FaultyTransport struct {
	statusOverride int
	body           []byte
}

// Just returns a response with fake SDP answer.
func (f *FaultyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	return nil, fmt.Errorf("TransportFailed")
}

func TestRemoteIPFromSDP(t *testing.T) {
	tests := []struct {
		sdp      string
		expected net.IP
	}{
		// https://tools.ietf.org/html/rfc4566#section-5
		{`v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 224.2.17.12/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
`, net.ParseIP("224.2.17.12")},
		// local addresses only
		{`v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 10.47.16.5/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
`, nil},
		// Remote IP in candidate attribute only
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 0.0.0.0
a=candidate:3769337065 1 udp 2122260223 1.2.3.4 56688 typ host generation 0 network-id 1 network-cost 50
a=ice-ufrag:aMAZ
a=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV
a=ice-options:trickle
a=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66
a=setup:actpass
a=mid:data
a=sctpmap:5000 webrtc-datachannel 1024
`, net.ParseIP("1.2.3.4")},
		// Unspecified address
		{`v=0
o=jdoe 2890844526 2890842807 IN IP4 0.0.0.0
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
`, nil},
		// Missing c= line
		{`v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
`, nil},
		// Single line, IP address only
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 224.2.1.1
`, net.ParseIP("224.2.1.1")},
		// Same, with TTL
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 224.2.1.1/127
`, net.ParseIP("224.2.1.1")},
		// Same, with TTL and multicast addresses
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 224.2.1.1/127/3
`, net.ParseIP("224.2.1.1")},
		// IPv6, address only
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP6 FF15::101
`, net.ParseIP("ff15::101")},
		// Same, with multicast addresses
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP6 FF15::101/3
`, net.ParseIP("ff15::101")},
		// Multiple c= lines
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 1.2.3.4
c=IN IP4 5.6.7.8
`, net.ParseIP("1.2.3.4")},
		// Modified from SDP sent by snowflake-client.
		{`v=0
o=- 7860378660295630295 2 IN IP4 127.0.0.1
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 54653 DTLS/SCTP 5000
c=IN IP4 1.2.3.4
a=candidate:3581707038 1 udp 2122260223 192.168.0.1 54653 typ host generation 0 network-id 1 network-cost 50
a=candidate:2617212910 1 tcp 1518280447 192.168.0.1 59673 typ host tcptype passive generation 0 network-id 1 network-cost 50
a=candidate:2082671819 1 udp 1686052607 1.2.3.4 54653 typ srflx raddr 192.168.0.1 rport 54653 generation 0 network-id 1 network-cost 50
a=ice-ufrag:IBdf
a=ice-pwd:G3lTrrC9gmhQx481AowtkhYz
a=fingerprint:sha-256 53:F8:84:D9:3C:1F:A0:44:AA:D6:3C:65:80:D3:CB:6F:23:90:17:41:06:F9:9C:10:D8:48:4A:A8:B6:FA:14:A1
a=setup:actpass
a=mid:data
a=sctpmap:5000 webrtc-datachannel 1024
`, net.ParseIP("1.2.3.4")},
		// Improper character within IPv4
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP4 224.2z.1.1
`, nil},
		// Improper character within IPv6
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP6 ff15:g::101
`, nil},
		// Bogus "IP7" addrtype
		{`v=0
o=- 4358805017720277108 2 IN IP4 0.0.0.0
s=-
t=0 0
a=group:BUNDLE data
a=msid-semantic: WMS
m=application 56688 DTLS/SCTP 5000
c=IN IP7 1.2.3.4
`, nil},
	}

	for _, test := range tests {
		// https://tools.ietf.org/html/rfc4566#section-5: "The sequence
		// CRLF (0x0d0a) is used to end a record, although parsers
		// SHOULD be tolerant and also accept records terminated with a
		// single newline character." We represent the test cases with
		// LF line endings for convenience, and test them both that way
		// and with CRLF line endings.
		lfSDP := test.sdp
		crlfSDP := strings.Replace(lfSDP, "\n", "\r\n", -1)

		ip := remoteIPFromSDP(lfSDP)
		if !ip.Equal(test.expected) {
			t.Errorf("expected %q, got %q from %q", test.expected, ip, lfSDP)
		}
		ip = remoteIPFromSDP(crlfSDP)
		if !ip.Equal(test.expected) {
			t.Errorf("expected %q, got %q from %q", test.expected, ip, crlfSDP)
		}
	}
}

func TestSessionDescriptions(t *testing.T) {
	Convey("Session description deserialization", t, func() {
		for _, test := range []struct {
			msg string
			ret *webrtc.SessionDescription
		}{
			{
				"test",
				nil,
			},
			{
				`{"type":"answer"}`,
				nil,
			},
			{
				`{"sdp":"test"}`,
				nil,
			},
			{
				`{"type":"test", "sdp":"test"}`,
				nil,
			},
			{
				`{"type":"answer", "sdp":"test"}`,
				&webrtc.SessionDescription{
					Type: webrtc.SDPTypeAnswer,
					SDP:  "test",
				},
			},
			{
				`{"type":"pranswer", "sdp":"test"}`,
				&webrtc.SessionDescription{
					Type: webrtc.SDPTypePranswer,
					SDP:  "test",
				},
			},
			{
				`{"type":"rollback", "sdp":"test"}`,
				&webrtc.SessionDescription{
					Type: webrtc.SDPTypeRollback,
					SDP:  "test",
				},
			},
			{
				`{"type":"offer", "sdp":"test"}`,
				&webrtc.SessionDescription{
					Type: webrtc.SDPTypeOffer,
					SDP:  "test",
				},
			},
		} {
			desc, _ := util.DeserializeSessionDescription(test.msg)
			So(desc, ShouldResemble, test.ret)
		}
	})
	Convey("Session description serialization", t, func() {
		for _, test := range []struct {
			desc *webrtc.SessionDescription
			ret  string
		}{
			{
				&webrtc.SessionDescription{
					Type: webrtc.SDPTypeOffer,
					SDP:  "test",
				},
				`{"type":"offer","sdp":"test"}`,
			},
		} {
			msg, err := util.SerializeSessionDescription(test.desc)
			So(msg, ShouldResemble, test.ret)
			So(err, ShouldBeNil)
		}
	})
}

func TestBrokerInteractions(t *testing.T) {
	const sampleSDP = `"v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\na=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ host generation 0 network-id 1 network-cost 50\r\na=candidate:2921887769 1 tcp 1518280447 8.8.8.8 35441 typ host tcptype passive generation 0 network-id 1 network-cost 50\r\na=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"`

	const sampleOffer = `{"type":"offer","sdp":` + sampleSDP + `}`
	const sampleAnswer = `{"type":"answer","sdp":` + sampleSDP + `}`

	Convey("Proxy connections to broker", t, func() {
		var err error
		broker, err = newSignalingServer("localhost")
		So(err, ShouldBeNil)
		tokens = newTokens(0)

		//Mock peerConnection
		config = webrtc.Configuration{
			ICEServers: []webrtc.ICEServer{
				{
					URLs: []string{"stun:stun.l.google.com:19302"},
				},
			},
		}
		pc, _ := webrtc.NewPeerConnection(config)
		offer, _ := util.DeserializeSessionDescription(sampleOffer)
		pc.SetRemoteDescription(*offer)
		answer, _ := pc.CreateAnswer(nil)
		pc.SetLocalDescription(answer)

		Convey("polls broker correctly", func() {
			var err error

			b, err := messages.EncodePollResponse(sampleOffer, true, "unknown")
			So(err, ShouldBeNil)
			broker.transport = &MockTransport{
				http.StatusOK,
				b,
			}

			sdp, _ := broker.pollOffer(sampleOffer, DefaultProxyType, "")
			expectedSDP, _ := strconv.Unquote(sampleSDP)
			So(sdp.SDP, ShouldResemble, expectedSDP)
		})
		Convey("handles poll error", func() {
			var err error

			b := []byte("test")
			So(err, ShouldBeNil)
			broker.transport = &MockTransport{
				http.StatusOK,
				b,
			}

			sdp, _ := broker.pollOffer(sampleOffer, DefaultProxyType, "")
			So(sdp, ShouldBeNil)
		})
		Convey("sends answer to broker", func() {
			var err error

			b, err := messages.EncodeAnswerResponse(true)
			So(err, ShouldBeNil)
			broker.transport = &MockTransport{
				http.StatusOK,
				b,
			}

			err = broker.sendAnswer(sampleAnswer, pc)
			So(err, ShouldBeNil)

			b, err = messages.EncodeAnswerResponse(false)
			So(err, ShouldBeNil)
			broker.transport = &MockTransport{
				http.StatusOK,
				b,
			}

			err = broker.sendAnswer(sampleAnswer, pc)
			So(err, ShouldNotBeNil)
		})
		Convey("handles answer error", func() {
			//Error if faulty transport
			broker.transport = &FaultyTransport{}
			err := broker.sendAnswer(sampleAnswer, pc)
			So(err, ShouldNotBeNil)

			//Error if status code is not ok
			broker.transport = &MockTransport{
				http.StatusGone,
				[]byte(""),
			}
			err = broker.sendAnswer("test", pc)
			So(err, ShouldNotEqual, nil)
			So(err.Error(), ShouldResemble,
				"error sending answer to broker: remote returned status code 410")

			//Error if we can't parse broker message
			broker.transport = &MockTransport{
				http.StatusOK,
				[]byte("test"),
			}
			err = broker.sendAnswer("test", pc)
			So(err, ShouldNotBeNil)

			//Error if broker message surpasses read limit
			broker.transport = &MockTransport{
				http.StatusOK,
				make([]byte, 100001),
			}
			err = broker.sendAnswer("test", pc)
			So(err, ShouldNotBeNil)
		})
	})
}

func TestUtilityFuncs(t *testing.T) {
	Convey("LimitedRead", t, func() {
		c, s := net.Pipe()
		Convey("Successful read", func() {
			go func() {
				bytes := make([]byte, 50)
				c.Write(bytes)
				c.Close()
			}()
			bytes, err := limitedRead(s, 60)
			So(len(bytes), ShouldEqual, 50)
			So(err, ShouldBeNil)
		})
		Convey("Large read", func() {
			go func() {
				bytes := make([]byte, 50)
				c.Write(bytes)
				c.Close()
			}()
			bytes, err := limitedRead(s, 49)
			So(len(bytes), ShouldEqual, 49)
			So(err, ShouldEqual, io.ErrUnexpectedEOF)
		})
		Convey("Failed read", func() {
			s.Close()
			bytes, err := limitedRead(s, 49)
			So(len(bytes), ShouldEqual, 0)
			So(err, ShouldEqual, io.ErrClosedPipe)
		})
	})
	Convey("SessionID Generation", t, func() {
		sid1 := genSessionID()
		sid2 := genSessionID()
		So(sid1, ShouldNotEqual, sid2)
	})
	Convey("CopyLoop", t, func() {
		c1, s1 := net.Pipe()
		c2, s2 := net.Pipe()
		go copyLoop(s1, s2, nil)
		go func() {
			bytes := []byte("Hello!")
			c1.Write(bytes)
		}()
		bytes := make([]byte, 6)
		n, err := c2.Read(bytes)
		So(n, ShouldEqual, 6)
		So(err, ShouldBeNil)
		So(bytes, ShouldResemble, []byte("Hello!"))
		s1.Close()

		//Check that copy loop has closed other connection
		_, err = s2.Write(bytes)
		So(err, ShouldNotBeNil)
	})
	Convey("isRelayURLAcceptable", t, func() {
		testingVector := []struct {
			pattern               string
			allowPrivateAddresses bool
			allowNonTLS           bool
			targetURL             string
			expects               error
		}{
			// These are copied from `TestMatchMember`.
			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net", expects: nil},
			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net", expects: nil},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-01-snowflake.torproject.net", expects: nil},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-aaa-snowflake.torproject.net", expects: nil},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-aaa-snowflake.faketorproject.net", expects: fmt.Errorf("")},

			{pattern: "^torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
			// Yes, this is how it works if there is no "^".
			{pattern: "torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: nil},

			// NonTLS
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "ws://snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "ws://snowflake.torproject.net", expects: nil},

			// Sneaky attempt to use path
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://evil.com/snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://evil.com/?test=snowflake.torproject.net", expects: fmt.Errorf("")},

			// IP address
			{pattern: "^1.1.1.1$", allowNonTLS: true, targetURL: "ws://1.1.1.1/test?test=test#test", expects: nil},
			{pattern: "^1.1.1.1$", allowNonTLS: true, targetURL: "ws://231.1.1.1/test?test=test#test", expects: fmt.Errorf("")},
			{pattern: "1.1.1.1$", allowNonTLS: true, targetURL: "ws://231.1.1.1/test?test=test#test", expects: nil},
			// Private IP address
			{pattern: "$", allowNonTLS: true, targetURL: "ws://192.168.1.1", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://127.0.0.1", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://[fc00::]/", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://[::1]/", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://0.0.0.0/", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://169.254.1.1/", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "ws://100.111.1.1/", expects: fmt.Errorf("")},
			{pattern: "192.168.1.100$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://192.168.1.100/test?test=test", expects: nil},
			{pattern: "localhost$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://localhost/test?test=test", expects: nil},
			{pattern: "::1$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://[::1]/test?test=test", expects: nil},
			// Multicast IP address. `checkIsRelayURLAcceptable` allows it,
			// but it's not valid in the context of WebSocket
			{pattern: "255.255.255.255$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://255.255.255.255/test?test=test", expects: nil},

			// Port
			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:8080/test?test=test#test", expects: nil},
			// This currently doesn't work as we only check hostname.
			// {pattern: "^snowflake.torproject.net:443$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:443", expects: nil},
			// {pattern: "^snowflake.torproject.net:443$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:9999", expects: fmt.Errorf("")},

			// Any URL
			{pattern: "$", allowNonTLS: false, targetURL: "wss://any.com/test?test=test#test", expects: nil},
			{pattern: "$", allowNonTLS: false, targetURL: "wss://1.1.1.1/test?test=test#test", expects: nil},

			// Weird / invalid / ambiguous URL
			{pattern: "$", allowNonTLS: true, targetURL: "snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "//snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "/path", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "wss://snowflake.torproject .net", expects: fmt.Errorf("")},
			{pattern: "$", allowNonTLS: true, targetURL: "wss://😀", expects: nil},
			{pattern: "$", allowNonTLS: true, targetURL: "wss://пример.рф", expects: nil},

			// Non-websocket protocols
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "https://snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "ftp://snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "https://snowflake.torproject.net", expects: fmt.Errorf("")},
			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "ftp://snowflake.torproject.net", expects: fmt.Errorf("")},
		}
		for _, v := range testingVector {
			err := checkIsRelayURLAcceptable(v.pattern, v.allowPrivateAddresses, v.allowNonTLS, v.targetURL)
			if v.expects != nil {
				So(err, ShouldNotBeNil)
			} else {
				So(err, ShouldBeNil)
			}
		}
	})
}
0707010000007D000081A400000000000000000000000167D9BD4E00000ADD000000000000000000000000000000000000002E00000000snowflake-2.11.0/proxy/lib/pt_event_logger.gopackage snowflake_proxy

import (
	"io"
	"log"
	"sync/atomic"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/task"
)

func NewProxyEventLogger(output io.Writer, disableStats bool) event.SnowflakeEventReceiver {
	logger := log.New(output, "", log.Flags())
	return &proxyEventLogger{logger: logger, disableStats: disableStats}
}

type proxyEventLogger struct {
	logger       *log.Logger
	disableStats bool
}

func (p *proxyEventLogger) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
	switch e.(type) {
	case event.EventOnProxyStarting:
		p.logger.Println(e.String())

		if p.logger.Flags()&log.LUTC == 0 {
			p.logger.Println("Local time is being used for logging. If you want to " +
				"share your log, consider to modify the date/time for more anonymity.")
		}
	case event.EventOnProxyStats:
		if !p.disableStats {
			p.logger.Println(e.String())
		}
	case event.EventOnCurrentNATTypeDetermined:
		p.logger.Println(e.String())
	default:
		// Suppress logs of these events
		// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40310
		// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40413
	}
}

type periodicProxyStats struct {
	bytesLogger bytesLogger
	// Completed successful connections.
	connectionCount atomic.Int32
	// Connections that failed to establish.
	failedConnectionCount atomic.Uint32
	logPeriod             time.Duration
	task                  *task.Periodic
	dispatcher            event.SnowflakeEventDispatcher
}

func newPeriodicProxyStats(logPeriod time.Duration, dispatcher event.SnowflakeEventDispatcher, bytesLogger bytesLogger) *periodicProxyStats {
	el := &periodicProxyStats{logPeriod: logPeriod, dispatcher: dispatcher, bytesLogger: bytesLogger}
	el.task = &task.Periodic{Interval: logPeriod, Execute: el.logTick}
	el.task.WaitThenStart()
	return el
}

func (p *periodicProxyStats) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
	switch e.(type) {
	case event.EventOnProxyConnectionOver:
		p.connectionCount.Add(1)
	case event.EventOnProxyConnectionFailed:
		p.failedConnectionCount.Add(1)
	}
}

func (p *periodicProxyStats) logTick() error {
	inboundSum, outboundSum := p.bytesLogger.GetStat()
	e := event.EventOnProxyStats{
		SummaryInterval:       p.logPeriod,
		ConnectionCount:       int(p.connectionCount.Swap(0)),
		FailedConnectionCount: uint(p.failedConnectionCount.Swap(0)),
	}
	e.InboundBytes, e.InboundUnit = formatTraffic(inboundSum)
	e.OutboundBytes, e.OutboundUnit = formatTraffic(outboundSum)
	p.dispatcher.OnNewSnowflakeEvent(e)
	return nil
}

func (p *periodicProxyStats) Close() error {
	return p.task.Close()
}
0707010000007E000081A400000000000000000000000167D9BD4E000003A6000000000000000000000000000000000000002F00000000snowflake-2.11.0/proxy/lib/pt_event_metrics.gopackage snowflake_proxy

import (
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
)

type EventCollector interface {
	TrackInBoundTraffic(value int64)
	TrackOutBoundTraffic(value int64)
	TrackNewConnection(country string)
	TrackFailedConnection()
}

type EventMetrics struct {
	collector EventCollector
}

func NewEventMetrics(collector EventCollector) *EventMetrics {
	return &EventMetrics{collector: collector}
}

func (em *EventMetrics) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
	switch e.(type) {
	case event.EventOnProxyStats:
		e := e.(event.EventOnProxyStats)
		em.collector.TrackInBoundTraffic(e.InboundBytes)
		em.collector.TrackOutBoundTraffic(e.OutboundBytes)
	case event.EventOnProxyConnectionOver:
		e := e.(event.EventOnProxyConnectionOver)
		em.collector.TrackNewConnection(e.Country)
	case event.EventOnProxyConnectionFailed:
		em.collector.TrackFailedConnection()
	}
}
0707010000007F000081A400000000000000000000000167D9BD4E00007457000000000000000000000000000000000000002800000000snowflake-2.11.0/proxy/lib/snowflake.go/*
Package snowflake_proxy provides functionality for creating, starting, and stopping a snowflake
proxy.

To run a proxy, you must first create a proxy configuration. Unconfigured fields
will be set to the defined defaults.

	proxy := snowflake_proxy.SnowflakeProxy{
		BrokerURL: "https://snowflake-broker.example.com",
		STUNURL: "stun:stun.l.google.com:19302",
		// ...
	}

You may then start and stop the proxy. Stopping the proxy will close existing connections and
the proxy will not poll for more clients.

	go func() {
		err := proxy.Start()
		// handle error
	}

	// ...

	proxy.Stop()
*/
package snowflake_proxy

import (
	"bytes"
	"crypto/rand"
	"encoding/base64"
	"fmt"
	"io"
	"log"
	"net"
	"net/http"
	"net/url"
	"reflect"
	"strings"
	"sync"
	"time"

	"github.com/pion/ice/v4"

	"github.com/gorilla/websocket"
	"github.com/pion/transport/v3/stdnet"
	"github.com/pion/webrtc/v4"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/constants"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/namematcher"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/task"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/websocketconn"
)

const (
	DefaultPollInterval = 5 * time.Second
	DefaultBrokerURL    = "https://snowflake-broker.torproject.net/"
	DefaultNATProbeURL  = "https://snowflake-broker.torproject.net:8443/probe"
	// This is rather a "DefaultDefaultRelayURL"
	DefaultRelayURL  = "wss://snowflake.torproject.net/"
	DefaultSTUNURL   = "stun:stun.l.google.com:19302,stun:stun.voip.blackberry.com:3478"
	DefaultProxyType = "standalone"
)

const (
	// NATUnknown is set if the proxy cannot connect to probetest.
	NATUnknown = "unknown"

	// NATRestricted is set if the proxy times out when connecting to a symmetric NAT.
	NATRestricted = "restricted"

	// NATUnrestricted is set if the proxy successfully connects to a symmetric NAT.
	NATUnrestricted = "unrestricted"
)

const (
	// Amount of time after sending an SDP answer before the proxy assumes the
	// client is not going to connect
	dataChannelTimeout = 20 * time.Second

	// Maximum number of bytes to be read from an HTTP request
	readLimit = 100000

	sessionIDLength = 16
)

const bufferedAmountLowThreshold uint64 = 256 * 1024 // 256 KB

var broker *SignalingServer

var currentNATTypeAccess = &sync.RWMutex{}

// currentNATType describes local network environment.
// Obtain currentNATTypeAccess before access.
var currentNATType = NATUnknown

func getCurrentNATType() string {
	currentNATTypeAccess.RLock()
	defer currentNATTypeAccess.RUnlock()
	return currentNATType
}

func setCurrentNATType(newType string) {
	currentNATTypeAccess.Lock()
	defer currentNATTypeAccess.Unlock()
	currentNATType = newType
}

var (
	tokens *tokens_t
	config webrtc.Configuration
	client http.Client
)

type GeoIP interface {
	GetCountryByAddr(net.IP) (string, bool)
}

// SnowflakeProxy is used to configure an embedded
// Snowflake in another Go application.
// For some more info also see CLI parameter descriptions in README.
type SnowflakeProxy struct {
	// How often to ask the broker for a new client
	PollInterval time.Duration
	// Capacity is the maximum number of clients a Snowflake will serve.
	// Proxies with a capacity of 0 will accept an unlimited number of clients.
	Capacity uint
	// STUNURL is the URLs (comma-separated) of the STUN server the proxy will use
	STUNURL string
	// BrokerURL is the URL of the Snowflake broker
	BrokerURL string
	// KeepLocalAddresses indicates whether local SDP candidates will be sent to the broker
	KeepLocalAddresses bool
	// RelayURL is the default `URL` of the server (relay)
	// that this proxy will forward client connections to,
	// in case the broker itself did not specify the said URL
	RelayURL string
	// OutboundAddress specify an IP address to use as SDP host candidate
	OutboundAddress string
	// EphemeralMinPort and EphemeralMaxPort limit the range of ports that
	// ICE UDP connections may allocate from.
	// When specifying the range, make sure it's at least 2x as wide
	// as the amount of clients that you are hoping to serve concurrently
	// (see the `Capacity` property).
	EphemeralMinPort uint16
	EphemeralMaxPort uint16
	// RelayDomainNamePattern is the pattern specify allowed domain name for relay
	// If the pattern starts with ^ then an exact match is required.
	// The rest of pattern is the suffix of domain name.
	// There is no look ahead assertion when matching domain name suffix,
	// thus the string prepend the suffix does not need to be empty or ends with a dot.
	RelayDomainNamePattern string
	// AllowProxyingToPrivateAddresses determines whether to allow forwarding
	// client connections to private IP addresses.
	// Useful when a Snowflake server (relay) is hosted on the same private network
	// as this proxy.
	AllowProxyingToPrivateAddresses bool
	AllowNonTLSRelay                bool
	// NATProbeURL is the URL of the probe service we use for NAT checks
	NATProbeURL string
	// NATTypeMeasurementInterval is time before NAT type is retested
	NATTypeMeasurementInterval time.Duration
	// ProxyType is the type reported to the broker, if not provided it "standalone" will be used
	ProxyType       string
	EventDispatcher event.SnowflakeEventDispatcher
	shutdown        chan struct{}

	// SummaryInterval is the time interval at which proxy stats will be logged
	SummaryInterval time.Duration

	// GeoIP will be used to detect the country of the clients if provided
	GeoIP GeoIP

	periodicProxyStats *periodicProxyStats
	bytesLogger        bytesLogger
}

// Checks whether an IP address is a remote address for the client
func isRemoteAddress(ip net.IP) bool {
	return !(util.IsLocal(ip) || ip.IsUnspecified() || ip.IsLoopback())
}

func genSessionID() string {
	buf := make([]byte, sessionIDLength)
	_, err := rand.Read(buf)
	if err != nil {
		panic(err.Error())
	}
	return strings.TrimRight(base64.StdEncoding.EncodeToString(buf), "=")
}

func limitedRead(r io.Reader, limit int64) ([]byte, error) {
	p, err := io.ReadAll(&io.LimitedReader{R: r, N: limit + 1})
	if err != nil {
		return p, err
	} else if int64(len(p)) == limit+1 {
		return p[0:limit], io.ErrUnexpectedEOF
	}
	return p, err
}

// SignalingServer keeps track of the SignalingServer in use by the Snowflake
type SignalingServer struct {
	url       *url.URL
	transport http.RoundTripper
}

func newSignalingServer(rawURL string) (*SignalingServer, error) {
	var err error
	s := new(SignalingServer)
	s.url, err = url.Parse(rawURL)
	if err != nil {
		return nil, fmt.Errorf("invalid broker url: %s", err)
	}

	s.transport = http.DefaultTransport.(*http.Transport)
	s.transport.(*http.Transport).ResponseHeaderTimeout = 30 * time.Second

	return s, nil
}

// Post sends a POST request to the SignalingServer
func (s *SignalingServer) Post(path string, payload io.Reader) ([]byte, error) {
	req, err := http.NewRequest("POST", path, payload)
	if err != nil {
		return nil, err
	}

	resp, err := s.transport.RoundTrip(req)
	if err != nil {
		return nil, err
	}
	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("remote returned status code %d", resp.StatusCode)
	}

	defer resp.Body.Close()
	return limitedRead(resp.Body, readLimit)
}

// pollOffer communicates the proxy's capabilities with broker
// and retrieves a compatible SDP offer and relay URL.
func (s *SignalingServer) pollOffer(sid string, proxyType string, acceptedRelayPattern string) (*webrtc.SessionDescription, string) {
	brokerPath := s.url.ResolveReference(&url.URL{Path: "proxy"})

	numClients := int((tokens.count() / 8) * 8) // Round down to 8
	currentNATTypeLoaded := getCurrentNATType()
	body, err := messages.EncodeProxyPollRequestWithRelayPrefix(sid, proxyType, currentNATTypeLoaded, numClients, acceptedRelayPattern)
	if err != nil {
		log.Printf("Error encoding poll message: %s", err.Error())
		return nil, ""
	}

	resp, err := s.Post(brokerPath.String(), bytes.NewBuffer(body))
	if err != nil {
		log.Printf("error polling broker: %s", err.Error())
	}

	offer, _, relayURL, err := messages.DecodePollResponseWithRelayURL(resp)
	if err != nil {
		log.Printf("Error reading broker response: %s", err.Error())
		log.Printf("body: %s", resp)
		return nil, ""
	}
	if offer != "" {
		offer, err := util.DeserializeSessionDescription(offer)
		if err != nil {
			log.Printf("Error processing session description: %s", err.Error())
			return nil, ""
		}
		return offer, relayURL
	}
	return nil, ""
}

// sendAnswer encodes an SDP answer, sends it to the broker
// and wait for its response
func (s *SignalingServer) sendAnswer(sid string, pc *webrtc.PeerConnection) error {
	ld := pc.LocalDescription()
	answer, err := util.SerializeSessionDescription(ld)
	if err != nil {
		return err
	}

	body, err := messages.EncodeAnswerRequest(answer, sid)
	if err != nil {
		return err
	}

	brokerPath := s.url.ResolveReference(&url.URL{Path: "answer"})
	resp, err := s.Post(brokerPath.String(), bytes.NewBuffer(body))
	if err != nil {
		return fmt.Errorf("error sending answer to broker: %s", err.Error())
	}

	success, err := messages.DecodeAnswerResponse(resp)
	if err != nil {
		return err
	}
	if !success {
		return fmt.Errorf("broker returned client timeout")
	}

	return nil
}

func copyLoop(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser, shutdown chan struct{}) {
	var once sync.Once
	defer c2.Close()
	defer c1.Close()
	done := make(chan struct{})
	copyer := func(dst io.ReadWriteCloser, src io.ReadWriteCloser) {
		// Experimentally each usage of buffer has been observed to be lower than
		// 2K; io.Copy defaults to 32K.
		// This is probably determined by MTU in the server's `newHTTPHandler`.
		size := 2 * 1024
		buffer := make([]byte, size)
		// Ignore io.ErrClosedPipe because it is likely caused by the
		// termination of copyer in the other direction.
		if _, err := io.CopyBuffer(dst, src, buffer); err != nil && err != io.ErrClosedPipe {
			log.Printf("io.CopyBuffer inside CopyLoop generated an error: %v", err)
		}
		once.Do(func() {
			close(done)
		})
	}

	go copyer(c1, c2)
	go copyer(c2, c1)

	select {
	case <-done:
	case <-shutdown:
	}
	log.Println("copy loop ended")
}

// We pass conn.RemoteAddr() as an additional parameter, rather than calling
// conn.RemoteAddr() inside this function, as a workaround for a hang that
// otherwise occurs inside conn.pc.RemoteDescription() (called by RemoteAddr).
// https://bugs.torproject.org/18628#comment:8
func (sf *SnowflakeProxy) datachannelHandler(conn *webRTCConn, remoteIP net.IP, relayURL string) {
	defer conn.Close()
	defer tokens.ret()

	if relayURL == "" {
		relayURL = sf.RelayURL
	}

	wsConn, err := connectToRelay(relayURL, remoteIP)
	if err != nil {
		log.Print(err)
		return
	}
	defer wsConn.Close()

	copyLoop(conn, wsConn, sf.shutdown)
	log.Printf("datachannelHandler ends")
}

func connectToRelay(relayURL string, remoteIP net.IP) (*websocketconn.Conn, error) {
	u, err := url.Parse(relayURL)
	if err != nil {
		return nil, fmt.Errorf("invalid relay url: %s", err)
	}

	if remoteIP != nil {
		// Encode client IP address in relay URL
		q := u.Query()
		q.Set("client_ip", remoteIP.String())
		u.RawQuery = q.Encode()
	} else {
		log.Printf("no remote address given in websocket")
	}

	ws, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
	if err != nil {
		return nil, fmt.Errorf("error dialing relay: %s = %s", u.String(), err)
	}

	wsConn := websocketconn.New(ws)
	log.Printf("Connected to relay: %v", relayURL)
	return wsConn, nil
}

type dataChannelHandlerWithRelayURL struct {
	RelayURL string
	sf       *SnowflakeProxy
}

func (d dataChannelHandlerWithRelayURL) datachannelHandler(conn *webRTCConn, remoteIP net.IP) {
	d.sf.datachannelHandler(conn, remoteIP, d.RelayURL)
}

func (sf *SnowflakeProxy) makeWebRTCAPI() *webrtc.API {
	settingsEngine := webrtc.SettingEngine{}

	if !sf.KeepLocalAddresses {
		settingsEngine.SetIPFilter(func(ip net.IP) (keep bool) {
			// `IsLoopback()` and `IsUnspecified` are likely not neded here,
			// but let's keep them just in case.
			// FYI there is similar code in other files in this project.
			keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified()
			return
		})
	}
	settingsEngine.SetIncludeLoopbackCandidate(sf.KeepLocalAddresses)

	// Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet
	// to get snowflake working in shadow (where the AF_NETLINK family is not implemented).
	// These two lines of code functionally revert a new change in pion by silently ignoring
	// when net.Interfaces() fails, rather than throwing an error
	vnet, _ := stdnet.NewNet()
	settingsEngine.SetNet(vnet)

	if sf.EphemeralMinPort != 0 && sf.EphemeralMaxPort != 0 {
		err := settingsEngine.SetEphemeralUDPPortRange(sf.EphemeralMinPort, sf.EphemeralMaxPort)
		if err != nil {
			log.Fatal("Invalid port range: min > max")
		}
	}

	if sf.OutboundAddress != "" {
		// replace SDP host candidates with the given IP without validation
		// still have server reflexive candidates to fall back on
		settingsEngine.SetNAT1To1IPs([]string{sf.OutboundAddress}, webrtc.ICECandidateTypeHost)
	}

	settingsEngine.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled)

	settingsEngine.SetDTLSInsecureSkipHelloVerify(true)

	return webrtc.NewAPI(webrtc.WithSettingEngine(settingsEngine))
}

// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE
// candidates is complete and the answer is available in LocalDescription.
// Installs an OnDataChannel callback that creates a webRTCConn and passes it to
// datachannelHandler.
func (sf *SnowflakeProxy) makePeerConnectionFromOffer(
	sdp *webrtc.SessionDescription,
	config webrtc.Configuration, dataChan chan struct{},
	handler func(conn *webRTCConn, remoteIP net.IP),
) (*webrtc.PeerConnection, error) {
	api := sf.makeWebRTCAPI()
	pc, err := api.NewPeerConnection(config)
	if err != nil {
		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
	}

	pc.OnDataChannel(func(dc *webrtc.DataChannel) {
		log.Printf("New Data Channel %s-%d\n", dc.Label(), dc.ID())
		close(dataChan)

		pr, pw := io.Pipe()
		conn := newWebRTCConn(pc, dc, pr, sf.bytesLogger)
		remoteIP := conn.RemoteIP()

		dc.SetBufferedAmountLowThreshold(bufferedAmountLowThreshold)

		dc.OnBufferedAmountLow(func() {
			select {
			case conn.sendMoreCh <- struct{}{}:
			default:
			}
		})

		dc.OnOpen(func() {
			log.Printf("Data Channel %s-%d open\n", dc.Label(), dc.ID())
			sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyClientConnected{})

			if sf.OutboundAddress != "" {
				selectedCandidatePair, err := pc.SCTP().Transport().ICETransport().GetSelectedCandidatePair()
				if err != nil {
					log.Printf("Warning: couldn't get the selected candidate pair")
				}

				log.Printf("Selected Local Candidate: %s:%d", selectedCandidatePair.Local.Address, selectedCandidatePair.Local.Port)
				if sf.OutboundAddress != selectedCandidatePair.Local.Address {
					log.Printf("Warning: the IP address provided by --outbound-address is not used for establishing peerconnection")
				}
			}
		})
		dc.OnClose(func() {
			// Make sure that the `Write()`s are not blocked any more.
			dc.OnBufferedAmountLow(func() {})
			close(conn.sendMoreCh)

			conn.lock.Lock()
			defer conn.lock.Unlock()
			log.Printf("Data Channel %s-%d close\n", dc.Label(), dc.ID())

			country := ""
			if sf.GeoIP != nil && !reflect.ValueOf(sf.GeoIP).IsNil() && remoteIP != nil {
				country, _ = sf.GeoIP.GetCountryByAddr(remoteIP)
			}
			sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyConnectionOver{Country: country})

			conn.dc = nil
			dc.Close()
			pw.Close()
		})
		dc.OnMessage(func(msg webrtc.DataChannelMessage) {
			n, err := pw.Write(msg.Data)
			if err != nil {
				if inErr := pw.CloseWithError(err); inErr != nil {
					log.Printf("close with error generated an error: %v", inErr)
				}

				return
			}

			conn.bytesLogger.AddOutbound(int64(n))

			if n != len(msg.Data) {
				// XXX: Maybe don't panic here and log an error instead?
				panic("short write")
			}
		})

		go handler(conn, remoteIP)
	})
	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
	// We have to wait for candidate gathering to complete
	// before we send the offer
	done := webrtc.GatheringCompletePromise(pc)
	err = pc.SetRemoteDescription(*sdp)
	if err != nil {
		if inerr := pc.Close(); inerr != nil {
			log.Printf("unable to call pc.Close after pc.SetRemoteDescription with error: %v", inerr)
		}
		return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err)
	}

	log.Println("Generating answer...")
	answer, err := pc.CreateAnswer(nil)
	if err != nil {
		if inerr := pc.Close(); inerr != nil {
			log.Printf("ICE gathering has generated an error when calling pc.Close: %v", inerr)
		}
		return nil, err
	}

	err = pc.SetLocalDescription(answer)
	if err != nil {
		if err = pc.Close(); err != nil {
			log.Printf("pc.Close after setting local description returned : %v", err)
		}
		return nil, err
	}

	// Wait for ICE candidate gathering to complete,
	// or for whatever we managed to gather before the broker
	// responds with an error to the client offer.
	// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40230
	select {
	case <-done:
	case <-time.After(constants.BrokerClientTimeout * time.Second * 3 / 4):
		log.Print("ICE gathering is not yet complete, but let's send the answer" +
			" before the client times out")
	}

	log.Printf("Answer: \n\t%s", strings.ReplaceAll(pc.LocalDescription().SDP, "\n", "\n\t"))

	return pc, nil
}

// Create a new PeerConnection. Blocks until the gathering of ICE
// candidates is complete and the answer is available in LocalDescription.
func (sf *SnowflakeProxy) makeNewPeerConnection(
	config webrtc.Configuration, dataChan chan struct{},
) (*webrtc.PeerConnection, error) {
	api := sf.makeWebRTCAPI()
	pc, err := api.NewPeerConnection(config)
	if err != nil {
		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
	}
	pc.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
		log.Printf("NAT check: WebRTC: OnConnectionStateChange: %v", pcs)
	})

	// Must create a data channel before creating an offer
	// https://github.com/pion/webrtc/wiki/Release-WebRTC@v3.0.0#a-data-channel-is-no-longer-implicitly-created-with-a-peerconnection
	dc, err := pc.CreateDataChannel("test", &webrtc.DataChannelInit{})
	if err != nil {
		log.Printf("CreateDataChannel ERROR: %s", err)
		return nil, err
	}
	dc.OnOpen(func() {
		log.Println("WebRTC: DataChannel.OnOpen")
		close(dataChan)
	})
	dc.OnClose(func() {
		log.Println("WebRTC: DataChannel.OnClose")
		go func() {
			// A hack to make NAT testing more reliable and not mis-identify
			// as "restricted".
			// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40419#note_3141855.
			// Instead we should just `dc.Close()` without waiting
			// and without a goroutine.
			// (or, perhaps, `dc.Close()` is not needed at all
			// in the OnClose callback?)
			<-time.After(5 * time.Second)

			log.Print("NAT check: WebRTC: dc.Close()")
			dc.Close()
		}()
	})

	offer, err := pc.CreateOffer(nil)
	// TODO: Potentially timeout and retry if ICE isn't working.
	if err != nil {
		log.Println("Failed to prepare offer", err)
		pc.Close()
		return nil, err
	}
	log.Println("Probetest: Created Offer")

	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
	// We have to wait for candidate gathering to complete
	// before we send the offer
	done := webrtc.GatheringCompletePromise(pc)
	// start the gathering of ICE candidates
	err = pc.SetLocalDescription(offer)
	if err != nil {
		log.Println("Failed to apply offer", err)
		pc.Close()
		return nil, err
	}
	log.Println("Probetest: Set local description")

	// Wait for ICE candidate gathering to complete
	<-done

	return pc, nil
}

func (sf *SnowflakeProxy) runSession(sid string) {
	connectedToClient := false
	defer func() {
		if !connectedToClient {
			tokens.ret()
		}
		// Otherwise we'll `tokens.ret()` when the connection finishes.
	}()

	offer, relayURL := broker.pollOffer(sid, sf.ProxyType, sf.RelayDomainNamePattern)
	if offer == nil {
		return
	}
	log.Printf("Received Offer From Broker: \n\t%s", strings.ReplaceAll(offer.SDP, "\n", "\n\t"))

	if relayURL != "" {
		if err := checkIsRelayURLAcceptable(sf.RelayDomainNamePattern, sf.AllowProxyingToPrivateAddresses, sf.AllowNonTLSRelay, relayURL); err != nil {
			log.Printf("bad offer from broker: %v", err)
			return
		}
	}

	dataChan := make(chan struct{})
	dataChannelAdaptor := dataChannelHandlerWithRelayURL{RelayURL: relayURL, sf: sf}
	pc, err := sf.makePeerConnectionFromOffer(offer, config, dataChan, dataChannelAdaptor.datachannelHandler)
	if err != nil {
		log.Printf("error making WebRTC connection: %s", err)
		return
	}

	err = broker.sendAnswer(sid, pc)
	if err != nil {
		log.Printf("error sending answer to client through broker: %s", err)
		if inerr := pc.Close(); inerr != nil {
			log.Printf("error calling pc.Close: %v", inerr)
		}
		return
	}
	// Set a timeout on peerconnection. If the connection state has not
	// advanced to PeerConnectionStateConnected in this time,
	// destroy the peer connection and return the token.
	select {
	case <-dataChan:
		log.Println("Connection successful")
		connectedToClient = true
	case <-time.After(dataChannelTimeout):
		log.Println("Timed out waiting for client to open data channel.")
		sf.EventDispatcher.OnNewSnowflakeEvent(
			event.EventOnProxyConnectionFailed{},
		)
		if err := pc.Close(); err != nil {
			log.Printf("error calling pc.Close: %v", err)
		}
	}
}

// Returns nil if the relayURL is acceptable.
// This is a pure function.
// If the hostname in the `relayURL` is not an IP address
// (but a name instead, e.g. `localhost`),
// this function will _not_ perform a DNS request to figure out
// if the name resolves to a private IP address,
// i.e. the private / public check will effectively be skipped.
func checkIsRelayURLAcceptable(
	allowedHostNamePattern string,
	allowPrivateIPs bool,
	allowNonTLSRelay bool,
	relayURL string,
) error {
	parsedRelayURL, err := url.Parse(relayURL)
	if err != nil {
		return fmt.Errorf("bad Relay URL %w", err)
	}
	if !allowPrivateIPs {
		ip := net.ParseIP(parsedRelayURL.Hostname())
		// Otherwise it's a domain name, or an invalid IP.
		if ip != nil {
			// We should probably use a ready library for this.
			if !isRemoteAddress(ip) {
				return fmt.Errorf("rejected Relay URL: private IPs are not allowed")
			}
		}
	}
	if !allowNonTLSRelay && parsedRelayURL.Scheme != "wss" {
		return fmt.Errorf("rejected Relay URL protocol: non-TLS not allowed")
	}
	// FYI our websocket library also rejects other protocols
	// https://github.com/gorilla/websocket/blob/5e002381133d322c5f1305d171f3bdd07decf229/client.go#L174-L181
	if parsedRelayURL.Scheme != "wss" && parsedRelayURL.Scheme != "ws" {
		return fmt.Errorf("rejected Relay URL protocol: only WebSocket is allowed")
	}
	matcher := namematcher.NewNameMatcher(allowedHostNamePattern)
	if !matcher.IsMember(parsedRelayURL.Hostname()) {
		return fmt.Errorf("rejected Relay URL: hostname does not match allowed pattern \"%v\"", allowedHostNamePattern)
	}
	return nil
}

// Start configures and starts a Snowflake, fully formed and special. Configuration
// values that are unset will default to their corresponding default values.
func (sf *SnowflakeProxy) Start() error {
	var err error

	sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyStarting{})
	sf.shutdown = make(chan struct{})

	// blank configurations revert to default
	if sf.PollInterval == 0 {
		sf.PollInterval = DefaultPollInterval
	}
	if sf.BrokerURL == "" {
		sf.BrokerURL = DefaultBrokerURL
	}
	if sf.RelayURL == "" {
		sf.RelayURL = DefaultRelayURL
	}
	if sf.STUNURL == "" {
		sf.STUNURL = DefaultSTUNURL
	}
	if sf.NATProbeURL == "" {
		sf.NATProbeURL = DefaultNATProbeURL
	}
	if sf.ProxyType == "" {
		sf.ProxyType = DefaultProxyType
	}
	if sf.EventDispatcher == nil {
		sf.EventDispatcher = event.NewSnowflakeEventDispatcher()
	}

	sf.bytesLogger = newBytesSyncLogger()
	sf.periodicProxyStats = newPeriodicProxyStats(sf.SummaryInterval, sf.EventDispatcher, sf.bytesLogger)
	sf.EventDispatcher.AddSnowflakeEventListener(sf.periodicProxyStats)

	broker, err = newSignalingServer(sf.BrokerURL)
	if err != nil {
		return fmt.Errorf("error configuring broker: %s", err)
	}

	_, err = url.Parse(sf.STUNURL)
	if err != nil {
		return fmt.Errorf("invalid stun url: %s", err)
	}
	_, err = url.Parse(sf.RelayURL)
	if err != nil {
		return fmt.Errorf("invalid default relay url: %s", err)
	}

	if !namematcher.IsValidRule(sf.RelayDomainNamePattern) {
		return fmt.Errorf("invalid relay domain name pattern")
	}

	if sf.EphemeralMaxPort != 0 {
		rangeWidth := sf.EphemeralMaxPort - sf.EphemeralMinPort
		expectedNumConcurrentClients := sf.Capacity
		if sf.Capacity == 0 {
			// Just a guess, since 0 means "unlimited".
			expectedNumConcurrentClients = 10
		}
		// See https://forum.torproject.org/t/remote-returned-status-code-400/15026/9?u=wofwca
		if uint(rangeWidth) < expectedNumConcurrentClients*2 {
			log.Printf(
				"Warning: ephemeral ports range seems narrow (%v-%v) "+
					"for the client capacity (%v). "+
					"Some client connections might fail. "+
					"Please widen the port range, or limit the 'capacity'.",
				sf.EphemeralMinPort,
				sf.EphemeralMaxPort,
				sf.Capacity,
			)
			// Instead of simply printing a warning, we could look into
			// utilizing [SetICEUDPMux](https://pkg.go.dev/github.com/pion/webrtc/v4#SettingEngine.SetICEUDPMux)
			// to multiplex multiple connections over one (or more?) ports.
		}
	}

	config = webrtc.Configuration{
		ICEServers: []webrtc.ICEServer{
			{
				URLs: strings.Split(sf.STUNURL, ","),
			},
		},
	}
	tokens = newTokens(sf.Capacity)

	err = sf.checkNATType(config, sf.NATProbeURL)
	if err != nil {
		// non-fatal error. Log it and continue
		log.Printf(err.Error())
		setCurrentNATType(NATUnknown)
	}
	sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnCurrentNATTypeDetermined{CurNATType: getCurrentNATType()})

	NatRetestTask := task.Periodic{
		Interval: sf.NATTypeMeasurementInterval,
		Execute: func() error {
			return sf.checkNATType(config, sf.NATProbeURL)
		},
		// Not setting OnError would shut down the periodic task on error by default.
		OnError: func(err error) {
			log.Printf("Periodic probetest failed: %s, retaining current NAT type: %s", err.Error(), getCurrentNATType())
		},
	}

	if sf.NATTypeMeasurementInterval != 0 {
		NatRetestTask.WaitThenStart()
		defer NatRetestTask.Close()
	}

	ticker := time.NewTicker(sf.PollInterval)
	defer ticker.Stop()

	for ; true; <-ticker.C {
		select {
		case <-sf.shutdown:
			return nil
		default:
			tokens.get()
			sessionID := genSessionID()
			sf.runSession(sessionID)
		}
	}
	return nil
}

// Stop closes all existing connections and shuts down the Snowflake.
func (sf *SnowflakeProxy) Stop() {
	close(sf.shutdown)
}

// checkNATType use probetest to determine NAT compatability by
// attempting to connect with a known symmetric NAT. If success,
// it is considered "unrestricted". If timeout it is considered "restricted"
func (sf *SnowflakeProxy) checkNATType(config webrtc.Configuration, probeURL string) error {
	log.Printf("Checking our NAT type, contacting NAT check probe server at \"%v\"...", probeURL)

	probe, err := newSignalingServer(probeURL)
	if err != nil {
		return fmt.Errorf("Error parsing url: %w", err)
	}

	dataChan := make(chan struct{})
	pc, err := sf.makeNewPeerConnection(config, dataChan)
	if err != nil {
		return fmt.Errorf("Error making WebRTC connection: %w", err)
	}
	defer func() {
		if err := pc.Close(); err != nil {
			log.Printf("Probetest: error calling pc.Close: %v", err)
		}
	}()

	offer := pc.LocalDescription()
	log.Printf("Probetest offer: \n\t%s", strings.ReplaceAll(offer.SDP, "\n", "\n\t"))
	sdp, err := util.SerializeSessionDescription(offer)
	if err != nil {
		return fmt.Errorf("Error encoding probe message: %w", err)
	}

	// send offer
	body, err := messages.EncodePollResponse(sdp, true, "")
	if err != nil {
		return fmt.Errorf("Error encoding probe message: %w", err)
	}

	resp, err := probe.Post(probe.url.String(), bytes.NewBuffer(body))
	if err != nil {
		return fmt.Errorf("Error polling probe: %w", err)
	}

	sdp, _, err = messages.DecodeAnswerRequest(resp)
	if err != nil {
		return fmt.Errorf("Error reading probe response: %w", err)
	}

	answer, err := util.DeserializeSessionDescription(sdp)
	if err != nil {
		return fmt.Errorf("Error setting answer: %w", err)
	}
	log.Printf("Probetest answer: \n\t%s", strings.ReplaceAll(answer.SDP, "\n", "\n\t"))

	err = pc.SetRemoteDescription(*answer)
	if err != nil {
		return fmt.Errorf("Error setting answer: %w", err)
	}

	prevNATType := getCurrentNATType()

	log.Printf("Waiting for a test WebRTC connection with NAT check probe server to establish...")
	select {
	case <-dataChan:
		log.Printf(
			"Test WebRTC connection with NAT check probe server established!"+
				" This means our NAT is %v!",
			NATUnrestricted,
		)
		setCurrentNATType(NATUnrestricted)
	case <-time.After(dataChannelTimeout):
		log.Printf(
			"Test WebRTC connection with NAT check probe server timed out."+
				" This means our NAT is %v.",
			NATRestricted,
		)
		setCurrentNATType(NATRestricted)
	}

	log.Printf("NAT Type measurement: %v -> %v\n", prevNATType, getCurrentNATType())

	return nil
}
07070100000080000081A400000000000000000000000167D9BD4E00000255000000000000000000000000000000000000002500000000snowflake-2.11.0/proxy/lib/tokens.gopackage snowflake_proxy

import (
	"sync/atomic"
)

type tokens_t struct {
	ch       chan struct{}
	capacity uint
	clients  atomic.Int64
}

func newTokens(capacity uint) *tokens_t {
	var ch chan struct{}
	if capacity != 0 {
		ch = make(chan struct{}, capacity)
	}

	return &tokens_t{
		ch:       ch,
		capacity: capacity,
		clients:  atomic.Int64{},
	}
}

func (t *tokens_t) get() {
	t.clients.Add(1)

	if t.capacity != 0 {
		t.ch <- struct{}{}
	}
}

func (t *tokens_t) ret() {
	t.clients.Add(-1)

	if t.capacity != 0 {
		<-t.ch
	}
}

func (t *tokens_t) count() int64 {
	return t.clients.Load()
}
07070100000081000081A400000000000000000000000167D9BD4E0000023F000000000000000000000000000000000000002A00000000snowflake-2.11.0/proxy/lib/tokens_test.gopackage snowflake_proxy

import (
	"testing"

	. "github.com/smartystreets/goconvey/convey"
)

func TestTokens(t *testing.T) {
	Convey("Tokens", t, func() {
		tokens := newTokens(2)
		So(tokens.count(), ShouldEqual, 0)
		tokens.get()
		So(tokens.count(), ShouldEqual, 1)
		tokens.ret()
		So(tokens.count(), ShouldEqual, 0)
	})
	Convey("Tokens capacity 0", t, func() {
		tokens := newTokens(0)
		So(tokens.count(), ShouldEqual, 0)
		for i := 0; i < 20; i++ {
			tokens.get()
		}
		So(tokens.count(), ShouldEqual, 20)
		tokens.ret()
		So(tokens.count(), ShouldEqual, 19)
	})
}
07070100000082000081A400000000000000000000000167D9BD4E0000093B000000000000000000000000000000000000002300000000snowflake-2.11.0/proxy/lib/util.gopackage snowflake_proxy

import (
	"time"
)

// bytesLogger is an interface which is used to allow logging the throughput
// of the Snowflake. A default bytesLogger(bytesNullLogger) does nothing.
type bytesLogger interface {
	AddOutbound(int64)
	AddInbound(int64)
	GetStat() (in int64, out int64)
}

// bytesNullLogger Default bytesLogger does nothing.
type bytesNullLogger struct{}

// AddOutbound in bytesNullLogger does nothing
func (b bytesNullLogger) AddOutbound(amount int64) {}

// AddInbound in bytesNullLogger does nothing
func (b bytesNullLogger) AddInbound(amount int64) {}

func (b bytesNullLogger) GetStat() (in int64, out int64) { return -1, -1 }

// bytesSyncLogger uses channels to safely log from multiple sources with output
// occuring at reasonable intervals.
type bytesSyncLogger struct {
	outboundChan, inboundChan chan int64
	statsChan                 chan bytesLoggerStats
	stats                     bytesLoggerStats
	outEvents, inEvents       int
	start                     time.Time
}

type bytesLoggerStats struct {
	outbound, inbound int64
}

// newBytesSyncLogger returns a new bytesSyncLogger and starts it loggin.
func newBytesSyncLogger() *bytesSyncLogger {
	b := &bytesSyncLogger{
		outboundChan: make(chan int64, 5),
		inboundChan:  make(chan int64, 5),
		statsChan:    make(chan bytesLoggerStats),
	}
	go b.log()
	b.start = time.Now()
	return b
}

func (b *bytesSyncLogger) log() {
	for {
		select {
		case amount := <-b.outboundChan:
			b.stats.outbound += amount
			b.outEvents++
		case amount := <-b.inboundChan:
			b.stats.inbound += amount
			b.inEvents++
		case b.statsChan <- b.stats:
			b.stats.inbound = 0
			b.stats.outbound = 0
			b.inEvents = 0
			b.outEvents = 0
		}
	}
}

// AddOutbound add a number of bytes to the outbound total reported by the logger
func (b *bytesSyncLogger) AddOutbound(amount int64) {
	b.outboundChan <- amount
}

// AddInbound add a number of bytes to the inbound total reported by the logger
func (b *bytesSyncLogger) AddInbound(amount int64) {
	b.inboundChan <- amount
}

// GetStat returns the current inbound and outbound stats from the logger and then zeros the counts
func (b *bytesSyncLogger) GetStat() (in int64, out int64) {
	stats := <-b.statsChan
	return stats.inbound, stats.outbound
}

func formatTraffic(amount int64) (value int64, unit string) { return amount / 1000, "KB" }
07070100000083000081A400000000000000000000000167D9BD4E00000EDE000000000000000000000000000000000000002900000000snowflake-2.11.0/proxy/lib/webrtcconn.gopackage snowflake_proxy

import (
	"context"
	"errors"
	"fmt"
	"io"
	"log"
	"net"
	"regexp"
	"sync"
	"time"

	"github.com/pion/ice/v4"
	"github.com/pion/sdp/v3"
	"github.com/pion/webrtc/v4"
)

const maxBufferedAmount uint64 = 512 * 1024 // 512 KB

var remoteIPPatterns = []*regexp.Regexp{
	/* IPv4 */
	regexp.MustCompile(`(?m)^c=IN IP4 ([\d.]+)(?:(?:\/\d+)?\/\d+)?(:? |\r?\n)`),
	/* IPv6 */
	regexp.MustCompile(`(?m)^c=IN IP6 ([0-9A-Fa-f:.]+)(?:\/\d+)?(:? |\r?\n)`),
}

type webRTCConn struct {
	dc *webrtc.DataChannel
	pc *webrtc.PeerConnection
	pr *io.PipeReader

	lock sync.Mutex // Synchronization for DataChannel destruction
	once sync.Once  // Synchronization for PeerConnection destruction

	inactivityTimeout time.Duration
	activity          chan struct{}
	sendMoreCh        chan struct{}
	cancelTimeoutLoop context.CancelFunc

	bytesLogger bytesLogger
}

func newWebRTCConn(pc *webrtc.PeerConnection, dc *webrtc.DataChannel, pr *io.PipeReader, bytesLogger bytesLogger) *webRTCConn {
	conn := &webRTCConn{pc: pc, dc: dc, pr: pr, bytesLogger: bytesLogger}
	conn.activity = make(chan struct{}, 100)
	conn.sendMoreCh = make(chan struct{}, 1)
	conn.inactivityTimeout = 30 * time.Second
	ctx, cancel := context.WithCancel(context.Background())
	conn.cancelTimeoutLoop = cancel
	go conn.timeoutLoop(ctx)
	return conn
}

func (c *webRTCConn) timeoutLoop(ctx context.Context) {
	timer := time.NewTimer(c.inactivityTimeout)
	for {
		select {
		case <-timer.C:
			_ = c.Close()
			log.Println("Closed connection due to inactivity")
			return
		case <-c.activity:
			if !timer.Stop() {
				<-timer.C
			}
			timer.Reset(c.inactivityTimeout)
			continue
		case <-ctx.Done():
			return
		}
	}
}

func (c *webRTCConn) Read(b []byte) (int, error) {
	return c.pr.Read(b)
}

func (c *webRTCConn) Write(b []byte) (int, error) {
	c.bytesLogger.AddInbound(int64(len(b)))
	select {
	case c.activity <- struct{}{}:
	default:
	}
	c.lock.Lock()
	defer c.lock.Unlock()
	if c.dc != nil {
		_ = c.dc.Send(b)
		if c.dc.BufferedAmount() >= maxBufferedAmount {
			<-c.sendMoreCh
		}
	}
	return len(b), nil
}

func (c *webRTCConn) Close() (err error) {
	c.once.Do(func() {
		c.cancelTimeoutLoop()
		err = errors.Join(c.pr.Close(), c.pc.Close())
	})
	return
}

func (c *webRTCConn) LocalAddr() net.Addr {
	return nil
}

func (c *webRTCConn) RemoteIP() net.IP {
	//Parse Remote SDP offer and extract client IP
	return remoteIPFromSDP(c.pc.RemoteDescription().SDP)
}

func (c *webRTCConn) SetDeadline(t time.Time) error {
	// nolint: golint
	return fmt.Errorf("SetDeadline not implemented")
}

func (c *webRTCConn) SetReadDeadline(t time.Time) error {
	// nolint: golint
	return fmt.Errorf("SetReadDeadline not implemented")
}

func (c *webRTCConn) SetWriteDeadline(t time.Time) error {
	// nolint: golint
	return fmt.Errorf("SetWriteDeadline not implemented")
}

func remoteIPFromSDP(str string) net.IP {
	// Look for remote IP in "a=candidate" attribute fields
	// https://tools.ietf.org/html/rfc5245#section-15.1
	var desc sdp.SessionDescription
	err := desc.Unmarshal([]byte(str))
	if err != nil {
		log.Println("Error parsing SDP: ", err.Error())
		return nil
	}
	for _, m := range desc.MediaDescriptions {
		for _, a := range m.Attributes {
			if a.IsICECandidate() {
				c, err := ice.UnmarshalCandidate(a.Value)
				if err == nil {
					ip := net.ParseIP(c.Address())
					if ip != nil && isRemoteAddress(ip) {
						return ip
					}
				}
			}
		}
	}
	// Finally look for remote IP in "c=" Connection Data field
	// https://tools.ietf.org/html/rfc4566#section-5.7
	for _, pattern := range remoteIPPatterns {
		m := pattern.FindStringSubmatch(str)
		if m != nil {
			// Ignore parsing errors, ParseIP returns nil.
			ip := net.ParseIP(m[1])
			if ip != nil && isRemoteAddress(ip) {
				return ip
			}

		}
	}

	return nil
}
07070100000084000081A400000000000000000000000167D9BD4E000021AA000000000000000000000000000000000000001F00000000snowflake-2.11.0/proxy/main.gopackage main

import (
	"flag"
	"fmt"
	"io"
	"log"
	"net"
	"os"
	"strconv"
	"strings"
	"time"

	"gitlab.torproject.org/tpo/anti-censorship/geoip"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version"
	sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/proxy/lib"
)

const minPollInterval = 2 * time.Second

func main() {
	pollInterval := flag.Duration("poll-interval", sf.DefaultPollInterval,
		fmt.Sprint("how often to ask the broker for a new client. Keep in mind that asking for a client will not always result in getting one. Minumum value is ", minPollInterval, ". Valid time units are \"ms\", \"s\", \"m\", \"h\"."))
	capacity := flag.Uint("capacity", 0, "maximum concurrent clients (default is to accept an unlimited number of clients)")
	stunURL := flag.String("stun", sf.DefaultSTUNURL, "Comma-separated STUN server `URL`s that this proxy will use will use to, among some other things, determine its public IP address")
	logFilename := flag.String("log", "", "log `filename`. If not specified, logs will be output to stderr (console).")
	rawBrokerURL := flag.String("broker", sf.DefaultBrokerURL, "The `URL` of the broker server that the proxy will be using to find clients")
	unsafeLogging := flag.Bool("unsafe-logging", false, "keep IP addresses and other sensitive info in the logs")
	logLocalTime := flag.Bool("log-local-time", false, "Use local time for logging (default: UTC)")
	keepLocalAddresses := flag.Bool("keep-local-addresses", false, "keep local LAN address ICE candidates.\nThis is usually pointless because Snowflake clients don't usually reside on the same local network as the proxy.")
	defaultRelayURL := flag.String("relay", sf.DefaultRelayURL, "The default `URL` of the server (relay) that this proxy will forward client connections to, in case the broker itself did not specify the said URL")
	probeURL := flag.String("nat-probe-server", sf.DefaultNATProbeURL, "The `URL` of the server that this proxy will use to check its network NAT type.\nDetermining NAT type helps to understand whether this proxy is compatible with certain clients' NAT")
	outboundAddress := flag.String("outbound-address", "", "prefer the given `address` as outbound address for client connections")
	allowedRelayHostNamePattern := flag.String("allowed-relay-hostname-pattern", "snowflake.torproject.net$", "this proxy will only be allowed to forward client connections to relays (servers) whose URL matches this pattern.\nNote that a pattern \"example.com$\" will match \"subdomain.example.com\" as well as \"other-domain-example.com\".\nIn order to only match \"example.com\", prefix the pattern with \"^\": \"^example.com$\"")
	allowProxyingToPrivateAddresses := flag.Bool("allow-proxying-to-private-addresses", false, "allow forwarding client connections to private IP addresses.\nUseful when a Snowflake server (relay) is hosted on the same private network as this proxy.")
	allowNonTLSRelay := flag.Bool("allow-non-tls-relay", false, "allow this proxy to pass client's data to the relay in an unencrypted form.\nThis is only useful if the relay doesn't support encryption, e.g. for testing / development purposes.")
	NATTypeMeasurementInterval := flag.Duration("nat-retest-interval", time.Hour*24,
		"the time interval between NAT type is retests (see \"nat-probe-server\"). 0s disables retest. Valid time units are \"s\", \"m\", \"h\".")
	summaryInterval := flag.Duration("summary-interval", time.Hour,
		"the time interval between summary log outputs, 0s disables summaries. Valid time units are \"s\", \"m\", \"h\".")
	disableStatsLogger := flag.Bool("disable-stats-logger", false, "disable the exposing mechanism for stats using logs")
	enableMetrics := flag.Bool("metrics", false, "enable the exposing mechanism for stats using metrics")
	metricsAddress := flag.String("metrics-address", "localhost", "set listen `address` for metrics service")
	metricsPort := flag.Int("metrics-port", 9999, "set port for the metrics service")
	verboseLogging := flag.Bool("verbose", false, "increase log verbosity")
	ephemeralPortsRangeFlag := flag.String("ephemeral-ports-range", "", "Set the `range` of ports used for client connections (format:\"<min>:<max>\").\nUseful in conjunction with port forwarding, in order to make the proxy NAT type \"unrestricted\".\nIf omitted, the ports will be chosen automatically from a wide range.\nWhen specifying the range, make sure it's at least 2x as wide as the amount of clients that you are hoping to serve concurrently (see the \"capacity\" flag).")
	geoipDatabase := flag.String("geoipdb", "/usr/share/tor/geoip", "path to correctly formatted geoip database mapping IPv4 address ranges to country codes")
	geoip6Database := flag.String("geoip6db", "/usr/share/tor/geoip6", "path to correctly formatted geoip database mapping IPv6 address ranges to country codes")
	versionFlag := flag.Bool("version", false, "display version info to stderr and quit")

	var ephemeralPortsRange []uint16 = []uint16{0, 0}

	flag.Parse()

	if *versionFlag {
		fmt.Fprintf(os.Stderr, "snowflake-proxy %s", version.ConstructResult())
		os.Exit(0)
	}

	if *pollInterval < minPollInterval {
		log.Fatalf("poll-interval must be >= %v", minPollInterval)
	}

	if *outboundAddress != "" && *keepLocalAddresses {
		log.Fatal("Cannot keep local address candidates when outbound address is specified")
	}

	eventLogger := event.NewSnowflakeEventDispatcher()

	if *ephemeralPortsRangeFlag != "" {
		ephemeralPortsRangeParts := strings.Split(*ephemeralPortsRangeFlag, ":")
		if len(ephemeralPortsRangeParts) == 2 {
			ephemeralMinPort, err := strconv.ParseUint(ephemeralPortsRangeParts[0], 10, 16)
			if err != nil {
				log.Fatal(err)
			}

			ephemeralMaxPort, err := strconv.ParseUint(ephemeralPortsRangeParts[1], 10, 16)
			if err != nil {
				log.Fatal(err)
			}

			if ephemeralMinPort == 0 || ephemeralMaxPort == 0 {
				log.Fatal("Ephemeral port cannot be zero")
			}
			if ephemeralMinPort > ephemeralMaxPort {
				log.Fatal("Invalid port range: min > max")
			}

			ephemeralPortsRange = []uint16{uint16(ephemeralMinPort), uint16(ephemeralMaxPort)}
		} else {
			log.Fatalf("Bad range port format: %v", *ephemeralPortsRangeFlag)
		}
	}

	gip, err := geoip.New(*geoipDatabase, *geoip6Database)
	if *enableMetrics && err != nil {
		// The geoip DB is only used for metrics, let's only report the error if enabled
		log.Println("Error loading geoip db for country based metrics:", err)
	}

	proxy := sf.SnowflakeProxy{
		PollInterval:       *pollInterval,
		Capacity:           uint(*capacity),
		STUNURL:            *stunURL,
		BrokerURL:          *rawBrokerURL,
		KeepLocalAddresses: *keepLocalAddresses,
		RelayURL:           *defaultRelayURL,
		NATProbeURL:        *probeURL,
		OutboundAddress:    *outboundAddress,
		EphemeralMinPort:   ephemeralPortsRange[0],
		EphemeralMaxPort:   ephemeralPortsRange[1],

		NATTypeMeasurementInterval: *NATTypeMeasurementInterval,
		EventDispatcher:            eventLogger,

		RelayDomainNamePattern:          *allowedRelayHostNamePattern,
		AllowProxyingToPrivateAddresses: *allowProxyingToPrivateAddresses,
		AllowNonTLSRelay:                *allowNonTLSRelay,

		SummaryInterval: *summaryInterval,
		GeoIP:           gip,
	}

	var logOutput = io.Discard
	var eventlogOutput io.Writer = os.Stderr

	loggerFlags := log.LstdFlags

	if !*logLocalTime {
		loggerFlags |= log.LUTC
	}

	log.SetFlags(loggerFlags)

	if *verboseLogging {
		logOutput = os.Stderr
	}

	if *logFilename != "" {
		f, err := os.OpenFile(*logFilename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
		if err != nil {
			log.Fatal(err)
		}
		defer f.Close()
		if *verboseLogging {
			logOutput = io.MultiWriter(logOutput, f)
		}
		eventlogOutput = io.MultiWriter(eventlogOutput, f)
	}

	if *unsafeLogging {
		log.SetOutput(logOutput)
	} else {
		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
	}

	proxyEventLogger := sf.NewProxyEventLogger(eventlogOutput, *disableStatsLogger)
	eventLogger.AddSnowflakeEventListener(proxyEventLogger)

	if *enableMetrics {
		metrics := sf.NewMetrics()

		err := metrics.Start(net.JoinHostPort(*metricsAddress, strconv.Itoa(*metricsPort)))
		if err != nil {
			log.Fatalf("could not enable metrics: %v", err)
		}

		eventLogger.AddSnowflakeEventListener(sf.NewEventMetrics(metrics))
	}

	log.Printf("snowflake-proxy %s\n", version.GetVersion())

	err = proxy.Start()
	if err != nil {
		log.Fatal(err)
	}
}
07070100000085000081A400000000000000000000000167D9BD4E000000CF000000000000000000000000000000000000001F00000000snowflake-2.11.0/renovate.json{
  "$schema": "https://docs.renovatebot.com/renovate-schema.json",
  "constraints": {
	  "go": "1.21"
  },
  "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"],
  "osvVulnerabilityAlerts": true
}
07070100000086000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001800000000snowflake-2.11.0/server07070100000087000081A400000000000000000000000167D9BD4E00000FC0000000000000000000000000000000000000002200000000snowflake-2.11.0/server/README.md<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**

- [Setup](#setup)
- [TLS](#tls)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

This is the server transport plugin for Snowflake.
The actual transport protocol it uses is
[WebSocket](https://tools.ietf.org/html/rfc6455).
In Snowflake, the client connects to the proxy using WebRTC,
and the proxy connects to the server (this program) using WebSocket.


# Setup

The server needs to be able to listen on port 80
in order to generate its TLS certificates.
On Linux, use the `setcap` program to enable
the server to listen on port 80 without running as root:
```
setcap 'cap_net_bind_service=+ep' /usr/local/bin/snowflake-server
```

Here is a short example of configuring your torrc file
to run the Snowflake server under Tor:
```
SocksPort 0
ORPort 9001
ExtORPort auto
BridgeRelay 1

ServerTransportListenAddr snowflake 0.0.0.0:443
ServerTransportPlugin snowflake exec ./server --acme-hostnames snowflake.example --acme-email admin@snowflake.example --log /var/log/tor/snowflake-server.log
```
The domain names given to the `--acme-hostnames` option
should resolve to the IP address of the server.
You can give more than one, separated by commas.


# TLS

The server uses TLS WebSockets by default: wss:// not ws://.
There is a `--disable-tls` option for testing purposes,
but you should use TLS in production.

The server automatically fetches certificates
from [Let's Encrypt](https://en.wikipedia.org/wiki/Let's_Encrypt) as needed.
Use the `--acme-hostnames` option to tell the server
what hostnames it may request certificates for.
You can optionally provide a contact email address,
using the `--acme-email` option,
so that Let's Encrypt can inform you of any problems.
The server will cache TLS certificate data in the directory
`pt_state/snowflake-certificate-cache` inside the tor state directory.

In order to fetch certificates automatically,
the server needs to listen on port 80,
in addition to whatever ports it is listening on
for WebSocket connections.
This is a requirement of the ACME protocol used by Let's Encrypt.
The program will exit if it can't bind to port 80.
On Linux, you can use the `setcap` program,
part of libcap2, to enable the server to bind to low-numbered ports
without having to run as root:
```
setcap 'cap_net_bind_service=+ep' /usr/local/bin/snowflake-server
```


# Multiple KCP state machines

The server internally uses a network protocol called KCP
to manage and persist client sessions.
Each KCP scheduler runs on a single thread.
When there are many simultaneous users (thousands),
a single KCP scheduler can be a bottleneck.
The `num-turbotunnel` pluggable transport option
lets you control the number of KCP instances,
which can help with CPU scaling:
https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40200

There is currently no way to set this option automatically.
You have to tune it manually.

```
ServerTransportOptions snowflake num-turbotunnel=2
```


# Controlling source addresses

Use the `orport-srcaddr` pluggable transport option to control what source addresses
are used when connecting to the upstream Tor ExtORPort or ORPort.
The value of the option may be a single IP address (e.g. "127.0.0.2")
or a CIDR range (e.g. "127.0.2.0/24"). If a range is given,
an IP address from the range is randomly chosen for each new connection.

Use `ServerTransportOptions` in torrc to set the option:
```
ServerTransportOptions snowflake orport-srcaddr=127.0.2.0/24
```

You can combine it with other options:
```
ServerTransportOptions snowflake num-turbotunnel=2 orport-srcaddr=127.0.2.0/24
```

Specifying a source address range other than the default 127.0.0.1
can help with conserving localhost ephemeral ports on servers
that receive a lot of connections:
https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40198
07070100000088000081A400000000000000000000000167D9BD4E0000013E000000000000000000000000000000000000002000000000snowflake-2.11.0/server/dial.go//go:build !linux
// +build !linux

package main

import "syscall"

// dialerControl does nothing.
//
// On Linux, this function would set the IP_BIND_ADDRESS_NO_PORT socket option
// in preparation for a future bind-before-connect.
func dialerControl(network, address string, c syscall.RawConn) error {
	return nil
}
07070100000089000081A400000000000000000000000167D9BD4E0000070A000000000000000000000000000000000000002600000000snowflake-2.11.0/server/dial_linux.go//go:build linux
// +build linux

package main

import (
	"syscall"

	"golang.org/x/sys/unix"
)

// dialerControl prepares a syscall.RawConn for a future bind-before-connect by
// setting the IP_BIND_ADDRESS_NO_PORT socket option.
//
// On Linux, setting the IP_BIND_ADDRESS_NO_PORT socket option helps conserve
// ephemeral ports when binding to a specific IP addresses before connecting
// (bind before connect), by not assigning the port number when bind is called,
// but waiting until connect. But problems arise if there are multiple processes
// doing bind-before-connect, and some of them use IP_BIND_ADDRESS_NO_PORT and
// some of them do not. When there is a mix, the ones that do will have their
// ephemeral ports reserved by the ones that do not, leading to EADDRNOTAVAIL
// errors.
//
// tor does bind-before-connect when the OutboundBindAddress option is set in
// torrc. Since version 0.4.7.13 (January 2023), tor sets
// IP_BIND_ADDRESS_NO_PORT unconditionally on platforms that support it, and
// therefore we must do the same, to avoid EADDRNOTAVAIL errors.
//
// # References
//
// https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40201#note_2839472
// https://forum.torproject.net/t/tor-relays-inet-csk-bind-conflict/5757/10
// https://blog.cloudflare.com/how-to-stop-running-out-of-ephemeral-ports-and-start-to-love-long-lived-connections/
// https://blog.cloudflare.com/the-quantum-state-of-a-tcp-port/
// https://forum.torproject.net/t/stable-release-0-4-5-16-and-0-4-7-13/6216
func dialerControl(network, address string, c syscall.RawConn) error {
	var sockErr error
	err := c.Control(func(fd uintptr) {
		sockErr = syscall.SetsockoptInt(int(fd), unix.SOL_IP, unix.IP_BIND_ADDRESS_NO_PORT, 1)
	})
	if err == nil {
		err = sockErr
	}
	return err
}
0707010000008A000041ED00000000000000000000000267D9BD4E00000000000000000000000000000000000000000000001C00000000snowflake-2.11.0/server/lib0707010000008B000081A400000000000000000000000167D9BD4E00002007000000000000000000000000000000000000002400000000snowflake-2.11.0/server/lib/http.gopackage snowflake_server

import (
	"bufio"
	"bytes"
	"crypto/hmac"
	"crypto/rand"
	"crypto/sha256"
	"encoding/binary"
	"fmt"
	"io"
	"log"
	"net"
	"net/http"
	"sync"
	"time"

	"github.com/gorilla/websocket"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/encapsulation"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/websocketconn"
)

const requestTimeout = 10 * time.Second

// How long to remember outgoing packets for a client, when we don't currently
// have an active WebSocket connection corresponding to that client. Because a
// client session may span multiple WebSocket connections, we keep packets we
// aren't able to send immediately in memory, for a little while but not
// indefinitely.
const clientMapTimeout = 1 * time.Minute

// How big to make the map of ClientIDs to IP addresses. The map is used in
// turbotunnelMode to store a reasonable IP address for a client session that
// may outlive any single WebSocket connection.
const clientIDAddrMapCapacity = 98304

// How long to wait for ListenAndServe or ListenAndServeTLS to return an error
// before deciding that it's not going to return.
const listenAndServeErrorTimeout = 100 * time.Millisecond

var upgrader = websocket.Upgrader{
	CheckOrigin: func(r *http.Request) bool { return true },
}

// clientIDAddrMap stores short-term mappings from ClientIDs to IP addresses.
// When we call pt.DialOr, tor wants us to provide a USERADDR string that
// represents the remote IP address of the client (for metrics purposes, etc.).
// This data structure bridges the gap between ServeHTTP, which knows about IP
// addresses, and handleStream, which is what calls pt.DialOr. The common piece
// of information linking both ends of the chain is the ClientID, which is
// attached to the WebSocket connection and every session.
var clientIDAddrMap = newClientIDMap(clientIDAddrMapCapacity)

type httpHandler struct {
	// pconns is the adapter layer between stream-oriented WebSocket
	// connections and the packet-oriented KCP layer. There are multiple of
	// these, corresponding to the multiple kcp.ServeConn in
	// Transport.Listen. Clients are assigned to a particular instance by a
	// hash of ClientID, indexed by a hash of the ClientID, in order to
	// distribute KCP processing load across CPU cores.
	pconns []*turbotunnel.QueuePacketConn

	// clientIDLookupKey is a secret key used to tweak the hash-based
	// assignment of ClientID to pconn, in order to avoid manipulation of
	// hash assignments.
	clientIDLookupKey []byte
}

// newHTTPHandler creates a new http.Handler that exchanges encapsulated packets
// over incoming WebSocket connections.
func newHTTPHandler(localAddr net.Addr, numInstances int, mtu int) *httpHandler {
	pconns := make([]*turbotunnel.QueuePacketConn, 0, numInstances)
	for i := 0; i < numInstances; i++ {
		pconns = append(pconns, turbotunnel.NewQueuePacketConn(localAddr, clientMapTimeout, mtu))
	}

	clientIDLookupKey := make([]byte, 16)
	_, err := rand.Read(clientIDLookupKey)
	if err != nil {
		panic(err)
	}

	return &httpHandler{
		pconns:            pconns,
		clientIDLookupKey: clientIDLookupKey,
	}
}

// lookupPacketConn returns the element of pconns that corresponds to client ID,
// according to the hash-based mapping.
func (handler *httpHandler) lookupPacketConn(clientID turbotunnel.ClientID) *turbotunnel.QueuePacketConn {
	s := hmac.New(sha256.New, handler.clientIDLookupKey).Sum(clientID[:])
	return handler.pconns[binary.LittleEndian.Uint64(s)%uint64(len(handler.pconns))]
}

func (handler *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	ws, err := upgrader.Upgrade(w, r, nil)
	if err != nil {
		log.Println(err)
		return
	}

	conn := websocketconn.New(ws)
	defer conn.Close()

	// Pass the address of client as the remote address of incoming connection
	clientIPParam := r.URL.Query().Get("client_ip")
	addr := clientAddr(clientIPParam)

	var token [len(turbotunnel.Token)]byte
	_, err = io.ReadFull(conn, token[:])
	if err != nil {
		// Don't bother logging EOF: that happens with an unused
		// connection, which clients make frequently as they maintain a
		// pool of proxies.
		if err != io.EOF {
			log.Printf("reading token: %v", err)
		}
		return
	}

	switch {
	case bytes.Equal(token[:], turbotunnel.Token[:]):
		err = handler.turbotunnelMode(conn, addr)
	default:
		// We didn't find a matching token, which means that we are
		// dealing with a client that doesn't know about such things.
		// Close the conn as we no longer support the old
		// one-session-per-WebSocket mode.
		log.Println("Received unsupported oneshot connection")
		return
	}
	if err != nil {
		log.Println(err)
		return
	}
}

// turbotunnelMode handles clients that sent turbotunnel.Token at the start of
// their stream. These clients expect to send and receive encapsulated packets,
// with a long-lived session identified by ClientID.
func (handler *httpHandler) turbotunnelMode(conn net.Conn, addr net.Addr) error {
	// Read the ClientID prefix. Every packet encapsulated in this WebSocket
	// connection pertains to the same ClientID.
	var clientID turbotunnel.ClientID
	_, err := io.ReadFull(conn, clientID[:])
	if err != nil {
		return fmt.Errorf("reading ClientID: %w", err)
	}

	// Store a short-term mapping from the ClientID to the client IP
	// address attached to this WebSocket connection. tor will want us to
	// provide a client IP address when we call pt.DialOr. But a KCP session
	// does not necessarily correspond to any single IP address--it's
	// composed of packets that are carried in possibly multiple WebSocket
	// streams. We apply the heuristic that the IP address of the most
	// recent WebSocket connection that has had to do with a session, at the
	// time the session is established, is the IP address that should be
	// credited for the entire KCP session.
	clientIDAddrMap.Set(clientID, addr)

	pconn := handler.lookupPacketConn(clientID)

	var wg sync.WaitGroup
	wg.Add(2)
	done := make(chan struct{})

	// The remainder of the WebSocket stream consists of encapsulated
	// packets. We read them one by one and feed them into the
	// QueuePacketConn on which kcp.ServeConn was set up, which eventually
	// leads to KCP-level sessions in the acceptSessions function.
	go func() {
		defer wg.Done()
		defer close(done) // Signal the write loop to finish
		var p [2048]byte
		for {
			n, err := encapsulation.ReadData(conn, p[:])
			if err == io.ErrShortBuffer {
				err = nil
			}
			if err != nil {
				return
			}
			pconn.QueueIncoming(p[:n], clientID)
		}
	}()

	// At the same time, grab packets addressed to this ClientID and
	// encapsulate them into the downstream.
	go func() {
		defer wg.Done()
		defer conn.Close() // Signal the read loop to finish

		// Buffer encapsulation.WriteData operations to keep length
		// prefixes in the same send as the data that follows.
		bw := bufio.NewWriter(conn)
		for {
			select {
			case <-done:
				return
			case p, ok := <-pconn.OutgoingQueue(clientID):
				if !ok {
					return
				}
				_, err := encapsulation.WriteData(bw, p)
				pconn.Restore(p)
				if err == nil {
					err = bw.Flush()
				}
				if err != nil {
					return
				}
			}
		}
	}()

	wg.Wait()

	return nil
}

// ClientMapAddr is a string that represents a connecting client.
type ClientMapAddr string

func (addr ClientMapAddr) Network() string {
	return "snowflake"
}

func (addr ClientMapAddr) String() string {
	return string(addr)
}

// Return a client address
func clientAddr(clientIPParam string) net.Addr {
	if clientIPParam == "" {
		return ClientMapAddr("")
	}
	// Check if client addr is a valid IP
	clientIP := net.ParseIP(clientIPParam)
	if clientIP == nil {
		return ClientMapAddr("")
	}
	// Check if client addr is 0.0.0.0 or [::]. Some proxies erroneously
	// report an address of 0.0.0.0: https://bugs.torproject.org/33157.
	if clientIP.IsUnspecified() {
		return ClientMapAddr("")
	}
	// Add a stub port number. USERADDR requires a port number.
	return ClientMapAddr((&net.TCPAddr{IP: clientIP, Port: 1, Zone: ""}).String())
}
0707010000008C000081A400000000000000000000000167D9BD4E000004C6000000000000000000000000000000000000002B00000000snowflake-2.11.0/server/lib/server_test.gopackage snowflake_server

import (
	"net"
	"strconv"
	"testing"

	. "github.com/smartystreets/goconvey/convey"
)

func TestClientAddr(t *testing.T) {
	Convey("Testing clientAddr", t, func() {
		// good tests
		for _, test := range []struct {
			input    string
			expected net.IP
		}{
			{"1.2.3.4", net.ParseIP("1.2.3.4")},
			{"1:2::3:4", net.ParseIP("1:2::3:4")},
		} {
			useraddr := clientAddr(test.input).String()
			host, port, err := net.SplitHostPort(useraddr)
			if err != nil {
				t.Errorf("clientAddr(%q) → SplitHostPort error %v", test.input, err)
				continue
			}
			if !test.expected.Equal(net.ParseIP(host)) {
				t.Errorf("clientAddr(%q) → host %q, not %v", test.input, host, test.expected)
			}
			portNo, err := strconv.Atoi(port)
			if err != nil {
				t.Errorf("clientAddr(%q) → port %q", test.input, port)
				continue
			}
			if portNo == 0 {
				t.Errorf("clientAddr(%q) → port %d", test.input, portNo)
			}
		}

		// bad tests
		for _, input := range []string{
			"",
			"abc",
			"1.2.3.4.5",
			"[12::34]",
			"0.0.0.0",
			"[::]",
		} {
			useraddr := clientAddr(input).String()
			if useraddr != "" {
				t.Errorf("clientAddr(%q) → %q, not %q", input, useraddr, "")
			}
		}
	})
}
0707010000008D000081A400000000000000000000000167D9BD4E00002962000000000000000000000000000000000000002900000000snowflake-2.11.0/server/lib/snowflake.go/*
Package snowflake_server implements the functionality necessary to accept Snowflake
connections from Snowflake clients.

Included in the package is a Transport type that implements the Pluggable Transports v2.1 Go API
specification. To start a TLS Snowflake server using the golang.org/x/crypto/acme/autocert
library, configure a certificate manager for the server's domain name and then create a new
Transport as follows:

	// The snowflake server runs a websocket server. To run this securely, you will
	// need a valid certificate.
	certManager := &autocert.Manager{
		Prompt:     autocert.AcceptTOS,
		HostPolicy: autocert.HostWhitelist("snowflake.yourdomain.com"),
		Email:      "you@yourdomain.com",
	}

	transport := snowflake_server.NewSnowflakeServer(certManager.GetCertificate)

The Listen function starts a new listener, and Accept will return incoming Snowflake connections:

	ln, err := transport.Listen(addr)
	if err != nil {
		// handle error
	}
	for {
		conn, err := ln.Accept()
		if err != nil {
			// handle error
		}
		// handle conn
	}
*/
package snowflake_server

import (
	"crypto/tls"
	"errors"
	"fmt"
	"io"
	"log"
	"net"
	"net/http"
	"sync"
	"time"

	"github.com/xtaci/kcp-go/v5"
	"github.com/xtaci/smux"
	"golang.org/x/net/http2"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel"
)

const (
	// WindowSize is the number of packets in the send and receive window of a KCP connection.
	WindowSize = 65535
	// StreamSize controls the maximum amount of in flight data between a client and server.
	StreamSize = 1048576 // 1MB
)

// Transport is a structure with methods that conform to the Go PT v2.1 API
// https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf
type Transport struct {
	getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error)
}

// NewSnowflakeServer returns a new server-side Transport for Snowflake.
func NewSnowflakeServer(getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error)) *Transport {
	return &Transport{getCertificate: getCertificate}
}

// Listen starts a listener on addr that will accept both turbotunnel
// and legacy Snowflake connections.
func (t *Transport) Listen(addr net.Addr, numKCPInstances int) (*SnowflakeListener, error) {
	listener := &SnowflakeListener{
		addr:   addr,
		queue:  make(chan net.Conn, 65534),
		closed: make(chan struct{}),
		ln:     make([]*kcp.Listener, 0, numKCPInstances),
	}

	// kcp-go doesn't provide an accessor for the current MTU setting (and
	// anyway we could not create a kcp.Listener without creating a
	// net.PacketConn for it first), so assume the default kcp.IKCP_MTU_DEF
	// (1400 bytes) and don't increase it elsewhere.
	handler := newHTTPHandler(addr, numKCPInstances, kcp.IKCP_MTU_DEF)
	server := &http.Server{
		Addr:        addr.String(),
		Handler:     handler,
		ReadTimeout: requestTimeout,
	}
	// We need to override server.TLSConfig.GetCertificate--but first
	// server.TLSConfig needs to be non-nil. If we just create our own new
	// &tls.Config, it will lack the default settings that the net/http
	// package sets up for things like HTTP/2. Therefore we first call
	// http2.ConfigureServer for its side effect of initializing
	// server.TLSConfig properly. An alternative would be to make a dummy
	// net.Listener, call Serve on it, and let it return.
	// https://github.com/golang/go/issues/16588#issuecomment-237386446
	err := http2.ConfigureServer(server, nil)
	if err != nil {
		return nil, err
	}
	server.TLSConfig.GetCertificate = t.getCertificate

	// Another unfortunate effect of the inseparable net/http ListenAndServe
	// is that we can't check for Listen errors like "permission denied" and
	// "address already in use" without potentially entering the infinite
	// loop of Serve. The hack we apply here is to wait a short time,
	// listenAndServeErrorTimeout, to see if an error is returned (because
	// it's better if the error message goes to the tor log through
	// SMETHOD-ERROR than if it only goes to the snowflake log).
	errChan := make(chan error)
	go func() {
		if t.getCertificate == nil {
			// TLS is disabled
			log.Printf("listening with plain HTTP on %s", addr)
			err := server.ListenAndServe()
			if err != nil {
				log.Printf("error in ListenAndServe: %s", err)
			}
			errChan <- err
		} else {
			log.Printf("listening with HTTPS on %s", addr)
			err := server.ListenAndServeTLS("", "")
			if err != nil {
				log.Printf("error in ListenAndServeTLS: %s", err)
			}
			errChan <- err
		}
	}()
	select {
	case err = <-errChan:
		break
	case <-time.After(listenAndServeErrorTimeout):
		break
	}
	if err != nil {
		return nil, err
	}

	listener.server = server

	// Start the KCP engines, set up to read and write its packets over the
	// WebSocket connections that arrive at the web server.
	// handler.ServeHTTP is responsible for encapsulation/decapsulation of
	// packets on behalf of KCP. KCP takes those packets and turns them into
	// sessions which appear in the acceptSessions function.
	for i, pconn := range handler.pconns {
		ln, err := kcp.ServeConn(nil, 0, 0, pconn)
		if err != nil {
			server.Close()
			return nil, err
		}
		go func() {
			defer ln.Close()
			err := listener.acceptSessions(ln)
			if err != nil {
				log.Printf("acceptSessions %d: %v", i, err)
			}
		}()
		listener.ln = append(listener.ln, ln)
	}

	return listener, nil
}

type SnowflakeListener struct {
	addr      net.Addr
	queue     chan net.Conn
	server    *http.Server
	ln        []*kcp.Listener
	closed    chan struct{}
	closeOnce sync.Once
}

// Accept allows the caller to accept incoming Snowflake connections.
// We accept connections from a queue to accommodate both incoming
// smux Streams and legacy non-turbotunnel connections.
func (l *SnowflakeListener) Accept() (net.Conn, error) {
	select {
	case <-l.closed:
		// channel has been closed, no longer accepting connections
		return nil, io.ErrClosedPipe
	case conn := <-l.queue:
		return conn, nil
	}
}

// Addr returns the address of the SnowflakeListener
func (l *SnowflakeListener) Addr() net.Addr {
	return l.addr
}

// Close closes the Snowflake connection.
func (l *SnowflakeListener) Close() error {
	// Close our HTTP server and our KCP listener
	l.closeOnce.Do(func() {
		close(l.closed)
		l.server.Close()
		for _, ln := range l.ln {
			ln.Close()
		}
	})
	return nil
}

// acceptStreams layers an smux.Session on the KCP connection and awaits streams
// on it. Passes each stream to our SnowflakeListener accept queue.
func (l *SnowflakeListener) acceptStreams(conn *kcp.UDPSession) error {
	// Look up the IP address associated with this KCP session, via the
	// ClientID that is returned by the session's RemoteAddr method.
	addr, ok := clientIDAddrMap.Get(conn.RemoteAddr().(turbotunnel.ClientID))
	if !ok {
		// This means that the map is tending to run over capacity, not
		// just that there was not client_ip on the incoming connection.
		// We store "" in the map in the absence of client_ip. This log
		// message means you should increase clientIDAddrMapCapacity.
		log.Printf("no address in clientID-to-IP map (capacity %d)", clientIDAddrMapCapacity)
	}

	smuxConfig := smux.DefaultConfig()
	smuxConfig.Version = 2
	smuxConfig.KeepAliveTimeout = 4 * time.Minute
	smuxConfig.MaxStreamBuffer = StreamSize
	sess, err := smux.Server(conn, smuxConfig)
	if err != nil {
		return err
	}

	for {
		stream, err := sess.AcceptStream()
		if err != nil {
			if err, ok := err.(net.Error); ok && err.Temporary() {
				continue
			}
			return err
		}
		l.queueConn(&SnowflakeClientConn{stream: stream, address: addr})
	}
}

// acceptSessions listens for incoming KCP connections and passes them to
// acceptStreams. It is handler.ServeHTTP that provides the network interface
// that drives this function.
func (l *SnowflakeListener) acceptSessions(ln *kcp.Listener) error {
	for {
		conn, err := ln.AcceptKCP()
		if err != nil {
			if err, ok := err.(net.Error); ok && err.Temporary() {
				continue
			}
			return err
		}
		// Permit coalescing the payloads of consecutive sends.
		conn.SetStreamMode(true)
		// Set the maximum send and receive window sizes to a high number
		// Removes KCP bottlenecks: https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40026
		conn.SetWindowSize(WindowSize, WindowSize)
		// Disable the dynamic congestion window (limit only by the
		// maximum of local and remote static windows).
		conn.SetNoDelay(
			0, // default nodelay
			0, // default interval
			0, // default resend
			1, // nc=1 => congestion window off
		)
		go func() {
			defer conn.Close()
			err := l.acceptStreams(conn)
			if err != nil && !errors.Is(err, io.ErrClosedPipe) {
				log.Printf("acceptStreams: %v", err)
			}
		}()
	}
}

func (l *SnowflakeListener) queueConn(conn net.Conn) error {
	select {
	case <-l.closed:
		return fmt.Errorf("accepted connection on closed listener")
	case l.queue <- conn:
		return nil
	}
}

// SnowflakeClientConn is a wrapper for the underlying turbotunnel conn
// (smux.Stream). It implements the net.Conn and io.WriterTo interfaces. The
// RemoteAddr method is overridden to refer to a real IP address, looked up from
// the client address map, rather than an abstract client ID.
type SnowflakeClientConn struct {
	stream  *smux.Stream
	address net.Addr
}

// Forward net.Conn methods, other than RemoteAddr, to the inner stream.
func (conn *SnowflakeClientConn) Read(b []byte) (int, error)    { return conn.stream.Read(b) }
func (conn *SnowflakeClientConn) Write(b []byte) (int, error)   { return conn.stream.Write(b) }
func (conn *SnowflakeClientConn) Close() error                  { return conn.stream.Close() }
func (conn *SnowflakeClientConn) LocalAddr() net.Addr           { return conn.stream.LocalAddr() }
func (conn *SnowflakeClientConn) SetDeadline(t time.Time) error { return conn.stream.SetDeadline(t) }
func (conn *SnowflakeClientConn) SetReadDeadline(t time.Time) error {
	return conn.stream.SetReadDeadline(t)
}

func (conn *SnowflakeClientConn) SetWriteDeadline(t time.Time) error {
	return conn.stream.SetWriteDeadline(t)
}

// RemoteAddr returns the mapped client address of the Snowflake connection.
func (conn *SnowflakeClientConn) RemoteAddr() net.Addr {
	return conn.address
}

// WriteTo implements the io.WriterTo interface by passing the call to the
// underlying smux.Stream.
func (conn *SnowflakeClientConn) WriteTo(w io.Writer) (int64, error) {
	return conn.stream.WriteTo(w)
}
0707010000008E000081A400000000000000000000000167D9BD4E00000C02000000000000000000000000000000000000002B00000000snowflake-2.11.0/server/lib/turbotunnel.gopackage snowflake_server

import (
	"net"
	"sync"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel"
)

// clientIDMap is a fixed-capacity mapping from ClientIDs to a net.Addr.
// Adding a new entry using the Set method causes the oldest existing entry to
// be forgotten.
//
// This data type is meant to be used to remember the IP address associated with
// a ClientID, during the short period of time between when a WebSocket
// connection with that ClientID began, and when a KCP session is established.
//
// The design requirements of this type are that it needs to remember a mapping
// for only a short time, and old entries should expire so as not to consume
// unbounded memory. It is not a critical error if an entry is forgotten before
// it is needed; better to forget entries than to use too much memory.
type clientIDMap struct {
	lock sync.Mutex
	// entries is a circular buffer of (ClientID, addr) pairs.
	entries []struct {
		clientID turbotunnel.ClientID
		addr     net.Addr
	}
	// oldest is the index of the oldest member of the entries buffer, the
	// one that will be overwritten at the next call to Set.
	oldest int
	// current points to the index of the most recent entry corresponding to
	// each ClientID.
	current map[turbotunnel.ClientID]int
}

// newClientIDMap makes a new clientIDMap with the given capacity.
func newClientIDMap(capacity int) *clientIDMap {
	return &clientIDMap{
		entries: make([]struct {
			clientID turbotunnel.ClientID
			addr     net.Addr
		}, capacity),
		oldest:  0,
		current: make(map[turbotunnel.ClientID]int),
	}
}

// Set adds a mapping from clientID to addr, replacing any previous mapping for
// clientID. It may also cause the clientIDMap to forget at most one other
// mapping, the oldest one.
func (m *clientIDMap) Set(clientID turbotunnel.ClientID, addr net.Addr) {
	m.lock.Lock()
	defer m.lock.Unlock()
	if len(m.entries) == 0 {
		// The invariant m.oldest < len(m.entries) does not hold in this
		// special case.
		return
	}
	// m.oldest is the index of the entry we're about to overwrite. If it's
	// the current entry for any ClientID, we need to delete that clientID
	// from the current map (that ClientID is now forgotten).
	if i, ok := m.current[m.entries[m.oldest].clientID]; ok && i == m.oldest {
		delete(m.current, m.entries[m.oldest].clientID)
	}
	// Overwrite the oldest entry.
	m.entries[m.oldest].clientID = clientID
	m.entries[m.oldest].addr = addr
	// Add the overwritten entry to the quick-lookup map.
	m.current[clientID] = m.oldest
	// What was the oldest entry is now the newest.
	m.oldest = (m.oldest + 1) % len(m.entries)
}

// Get returns a previously stored mapping. The second return value indicates
// whether clientID was actually present in the map. If it is false, then the
// returned address will be nil.
func (m *clientIDMap) Get(clientID turbotunnel.ClientID) (net.Addr, bool) {
	m.lock.Lock()
	defer m.lock.Unlock()
	if i, ok := m.current[clientID]; ok {
		return m.entries[i].addr, true
	} else {
		return nil, false
	}
}
0707010000008F000081A400000000000000000000000167D9BD4E00000F5C000000000000000000000000000000000000003000000000snowflake-2.11.0/server/lib/turbotunnel_test.gopackage snowflake_server

import (
	"encoding/binary"
	"net"
	"testing"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel"
)

func TestClientIDMap(t *testing.T) {
	// Convert a uint64 into a ClientID.
	id := func(n uint64) turbotunnel.ClientID {
		var clientID turbotunnel.ClientID
		binary.PutUvarint(clientID[:], n)
		return clientID
	}

	// Does m.Get(key) and checks that the output matches what is expected.
	expectGet := func(m *clientIDMap, clientID turbotunnel.ClientID, expectedAddr string, expectedOK bool) {
		t.Helper()
		addr, ok := m.Get(clientID)
		if (ok && addr.String() != expectedAddr) || ok != expectedOK {
			t.Errorf("expected (%+q, %v), got (%+q, %v)", expectedAddr, expectedOK, addr, ok)
		}
	}

	// Checks that the len of m.current is as expected.
	expectSize := func(m *clientIDMap, expectedLen int) {
		t.Helper()
		if len(m.current) != expectedLen {
			t.Errorf("expected map len %d, got %d %+v", expectedLen, len(m.current), m.current)
		}
	}

	// Convert a string to a net.Addr
	ip := func(addr string) net.Addr {
		ret, err := net.ResolveIPAddr("ip", addr)
		if err != nil {
			t.Errorf("received error: %s", err.Error())
		}
		return ret
	}

	// Zero-capacity map can't remember anything.
	{
		m := newClientIDMap(0)
		expectSize(m, 0)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1234), "", false)

		m.Set(id(0), ip("1.1.1.1"))
		expectSize(m, 0)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1234), "", false)

		m.Set(id(1234), ip("1.1.1.1"))
		expectSize(m, 0)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1234), "", false)
	}

	{
		m := newClientIDMap(1)
		expectSize(m, 0)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1), "", false)

		m.Set(id(0), ip("1.1.1.1"))
		expectSize(m, 1)
		expectGet(m, id(0), "1.1.1.1", true)
		expectGet(m, id(1), "", false)

		m.Set(id(1), ip("1.1.1.2")) // forgets the (0, "1.1.1.1") entry
		expectSize(m, 1)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1), "1.1.1.2", true)

		m.Set(id(1), ip("1.1.1.3")) // forgets the (1, "1.1.1.2") entry
		expectSize(m, 1)
		expectGet(m, id(0), "", false)
		expectGet(m, id(1), "1.1.1.3", true)
	}

	{
		m := newClientIDMap(5)
		m.Set(id(0), ip("1.1.1.1"))
		m.Set(id(1), ip("1.1.1.2"))
		m.Set(id(2), ip("1.1.1.3"))
		m.Set(id(0), ip("1.1.1.4")) // shadows the (0, "1.1.1.1") entry
		m.Set(id(3), ip("1.1.1.5"))
		expectSize(m, 4)
		expectGet(m, id(0), "1.1.1.4", true)
		expectGet(m, id(1), "1.1.1.2", true)
		expectGet(m, id(2), "1.1.1.3", true)
		expectGet(m, id(3), "1.1.1.5", true)
		expectGet(m, id(4), "", false)

		m.Set(id(4), ip("1.1.1.6")) // forgets the (0, "1.1.1.1") entry but should preserve (0, "1.1.1.4")
		expectSize(m, 5)
		expectGet(m, id(0), "1.1.1.4", true)
		expectGet(m, id(1), "1.1.1.2", true)
		expectGet(m, id(2), "1.1.1.3", true)
		expectGet(m, id(3), "1.1.1.5", true)
		expectGet(m, id(4), "1.1.1.6", true)

		m.Set(id(5), ip("1.1.1.7")) // forgets the (1, "1.1.1.2") entry
		m.Set(id(0), ip("1.1.1.8")) // forgets the (2, "1.1.1.3") entry and shadows (0, "1.1.1.4")
		expectSize(m, 4)
		expectGet(m, id(0), "1.1.1.8", true)
		expectGet(m, id(1), "", false)
		expectGet(m, id(2), "", false)
		expectGet(m, id(3), "1.1.1.5", true)
		expectGet(m, id(4), "1.1.1.6", true)
		expectGet(m, id(5), "1.1.1.7", true)

		m.Set(id(0), ip("1.1.1.9"))  // forgets the (0, "1.1.1.4") entry and shadows (0, "1.1.1.8")
		m.Set(id(0), ip("1.1.1.10")) // forgets the (3, "1.1.1.5") entry and shadows (0, "1.1.1.9")
		m.Set(id(0), ip("1.1.1.11")) // forgets the (4, "1.1.1.6") entry and shadows (0, "1.1.1.10")
		m.Set(id(0), ip("1.1.1.12")) // forgets the (5, "1.1.1.7") entry and shadows (0, "1.1.1.11")
		expectSize(m, 1)
		expectGet(m, id(0), "1.1.1.12", true)
		expectGet(m, id(1), "", false)
		expectGet(m, id(2), "", false)
		expectGet(m, id(3), "", false)
		expectGet(m, id(4), "", false)
		expectGet(m, id(5), "", false)
	}
}
07070100000090000081A400000000000000000000000167D9BD4E00000442000000000000000000000000000000000000002400000000snowflake-2.11.0/server/randaddr.gopackage main

import (
	"crypto/rand"
	"fmt"
	"net"
)

// randIPAddr generates a random IP address within the network represented by
// ipnet.
func randIPAddr(ipnet *net.IPNet) (net.IP, error) {
	if len(ipnet.IP) != len(ipnet.Mask) {
		return nil, fmt.Errorf("IP and mask have unequal lengths (%v and %v)", len(ipnet.IP), len(ipnet.Mask))
	}
	ip := make(net.IP, len(ipnet.IP))
	_, err := rand.Read(ip)
	if err != nil {
		return nil, err
	}
	for i := 0; i < len(ipnet.IP); i++ {
		ip[i] = (ipnet.IP[i] & ipnet.Mask[i]) | (ip[i] & ^ipnet.Mask[i])
	}
	return ip, nil
}

// parseIPCIDR parses a CIDR-notation IP address and prefix length; or if that
// fails, as a plain IP address (with the prefix length equal to the address
// length).
func parseIPCIDR(s string) (*net.IPNet, error) {
	_, ipnet, err := net.ParseCIDR(s)
	if err == nil {
		return ipnet, nil
	}
	// IP/mask failed; try just IP now, but remember err, to return it in
	// case that fails too.
	ip := net.ParseIP(s)
	if ip != nil {
		return &net.IPNet{IP: ip, Mask: net.CIDRMask(len(ip)*8, len(ip)*8)}, nil
	}
	return nil, err
}
07070100000091000081A400000000000000000000000167D9BD4E00001011000000000000000000000000000000000000002900000000snowflake-2.11.0/server/randaddr_test.gopackage main

import (
	"bytes"
	"net"
	"testing"
)

func mustParseCIDR(s string) *net.IPNet {
	_, ipnet, err := net.ParseCIDR(s)
	if err != nil {
		panic(err)
	}
	return ipnet
}

func TestRandAddr(t *testing.T) {
outer:
	for _, ipnet := range []*net.IPNet{
		mustParseCIDR("127.0.0.1/0"),
		mustParseCIDR("127.0.0.1/24"),
		mustParseCIDR("127.0.0.55/32"),
		mustParseCIDR("2001:db8::1234/0"),
		mustParseCIDR("2001:db8::1234/32"),
		mustParseCIDR("2001:db8::1234/128"),
		// Non-canonical masks (that don't consist of 1s followed by 0s)
		// work too, why not.
		&net.IPNet{
			IP:   net.IP{1, 2, 3, 4},
			Mask: net.IPMask{0x00, 0x07, 0xff, 0xff},
		},
	} {
		for i := 0; i < 100; i++ {
			ip, err := randIPAddr(ipnet)
			if err != nil {
				t.Errorf("%v returned error %v", ipnet, err)
				continue outer
			}
			if !ipnet.Contains(ip) {
				t.Errorf("%v does not contain %v", ipnet, ip)
				continue outer
			}
		}
	}
}

func TestRandAddrUnequalLengths(t *testing.T) {
	for _, ipnet := range []*net.IPNet{
		&net.IPNet{
			IP:   net.IP{1, 2, 3, 4},
			Mask: net.CIDRMask(32, 128),
		},
		&net.IPNet{
			IP:   net.IP{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
			Mask: net.CIDRMask(24, 32),
		},
		&net.IPNet{
			IP:   net.IP{1, 2, 3, 4},
			Mask: net.IPMask{},
		},
		&net.IPNet{
			IP:   net.IP{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
			Mask: net.IPMask{},
		},
	} {
		_, err := randIPAddr(ipnet)
		if err == nil {
			t.Errorf("%v did not result in error, but should have", ipnet)
		}
	}
}

func BenchmarkRandAddr(b *testing.B) {
	for _, test := range []struct {
		label string
		ipnet net.IPNet
	}{
		{"IPv4/32", net.IPNet{IP: net.IP{127, 0, 0, 1}, Mask: net.CIDRMask(32, 32)}},
		{"IPv4/24", net.IPNet{IP: net.IP{127, 0, 0, 1}, Mask: net.CIDRMask(32, 32)}},
		{"IPv6/64", net.IPNet{
			IP:   net.IP{0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x12, 0x34},
			Mask: net.CIDRMask(64, 128),
		}},
		{"IPv6/128", net.IPNet{
			IP:   net.IP{0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x12, 0x34},
			Mask: net.CIDRMask(128, 128),
		}},
	} {
		b.Run(test.label, func(b *testing.B) {
			for i := 0; i < b.N; i++ {
				_, err := randIPAddr(&test.ipnet)
				if err != nil {
					b.Fatal(err)
				}
			}
		})
	}
}

func ipNetEqual(a, b *net.IPNet) bool {
	if !a.IP.Equal(b.IP) {
		return false
	}
	// Comparing masks for equality is a little tricky because they may be
	// different lengths. For masks in canonical form (those for which
	// Size() returns other than (0, 0)), we consider two masks equal if the
	// numbers of bits *not* covered by the prefix are equal; e.g.
	// (120, 128) is equal to (24, 32), because they both have 8 bits not in
	// the prefix. If either mask is not in canonical form, we require them
	// to be equal as byte arrays (which includes length).
	aOnes, aBits := a.Mask.Size()
	bOnes, bBits := b.Mask.Size()
	if aBits == 0 || bBits == 0 {
		return bytes.Equal(a.Mask, b.Mask)
	} else {
		return aBits-aOnes == bBits-bOnes
	}
}

func TestParseIPCIDR(t *testing.T) {
	// Well-formed inputs.
	for _, test := range []struct {
		input    string
		expected *net.IPNet
	}{
		{"127.0.0.123", mustParseCIDR("127.0.0.123/32")},
		{"127.0.0.123/0", mustParseCIDR("127.0.0.123/0")},
		{"127.0.0.123/24", mustParseCIDR("127.0.0.123/24")},
		{"127.0.0.123/32", mustParseCIDR("127.0.0.123/32")},
		{"2001:db8::1234", mustParseCIDR("2001:db8::1234/128")},
		{"2001:db8::1234/0", mustParseCIDR("2001:db8::1234/0")},
		{"2001:db8::1234/32", mustParseCIDR("2001:db8::1234/32")},
		{"2001:db8::1234/128", mustParseCIDR("2001:db8::1234/128")},
	} {
		ipnet, err := parseIPCIDR(test.input)
		if err != nil {
			t.Errorf("%q returned error %v", test.input, err)
			continue
		}
		if !ipNetEqual(ipnet, test.expected) {
			t.Errorf("%q → %v, expected %v", test.input, ipnet, test.expected)
		}
	}

	// Bad inputs.
	for _, input := range []string{
		"",
		"1.2.3",
		"1.2.3/16",
		"2001:db8:1234",
		"2001:db8:1234/64",
		"localhost",
	} {
		_, err := parseIPCIDR(input)
		if err == nil {
			t.Errorf("%q did not result in error, but should have", input)
		}
	}
}
07070100000092000081A400000000000000000000000167D9BD4E00002641000000000000000000000000000000000000002200000000snowflake-2.11.0/server/server.go// Snowflake-specific websocket server plugin. It reports the transport name as
// "snowflake".
package main

import (
	"errors"
	"flag"
	"fmt"
	"io"
	"log"
	"net"
	"net/http"
	"os"
	"os/signal"
	"path/filepath"
	"strconv"
	"strings"
	"sync"
	"syscall"

	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version"
	"golang.org/x/crypto/acme/autocert"

	pt "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib"
	sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/server/lib"
)

const ptMethodName = "snowflake"

var ptInfo pt.ServerInfo

func usage() {
	fmt.Fprintf(os.Stderr, `Usage: %s [OPTIONS]

WebSocket server pluggable transport for Snowflake. Works only as a managed
proxy. Uses TLS with ACME (Let's Encrypt) by default. Set the certificate
hostnames with the --acme-hostnames option. Use ServerTransportListenAddr in
torrc to choose the listening port. When using TLS, this program will open an
additional HTTP listener on port 80 to work with ACME.

`, os.Args[0])
	flag.PrintDefaults()
}

// proxy copies data bidirectionally from one connection to another.
func proxy(local *net.TCPConn, conn net.Conn) {
	var wg sync.WaitGroup
	wg.Add(2)

	go func() {
		if _, err := io.Copy(conn, local); err != nil && !errors.Is(err, io.ErrClosedPipe) {
			log.Printf("error copying ORPort to WebSocket %v", err)
		}
		local.CloseRead()
		conn.Close()
		wg.Done()
	}()
	go func() {
		if _, err := io.Copy(local, conn); err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrClosedPipe) {
			log.Printf("error copying WebSocket to ORPort %v", err)
		}
		local.CloseWrite()
		conn.Close()
		wg.Done()
	}()

	wg.Wait()
}

// handleConn bidirectionally connects a client snowflake connection with the
// ORPort. If orPortSrcAddr is not nil, addresses from the given range are used
// when dialing the ORPOrt.
func handleConn(conn net.Conn, orPortSrcAddr *net.IPNet) error {
	addr := conn.RemoteAddr().String()
	statsChannel <- addr != ""

	dialer := net.Dialer{
		Control: dialerControl,
	}
	if orPortSrcAddr != nil {
		// Use a random source IP address in the given range.
		ip, err := randIPAddr(orPortSrcAddr)
		if err != nil {
			return err
		}
		dialer.LocalAddr = &net.TCPAddr{IP: ip}
	}
	or, err := pt.DialOrWithDialer(&dialer, &ptInfo, addr, ptMethodName)
	if err != nil {
		return fmt.Errorf("failed to connect to ORPort: %s", err)
	}
	defer or.Close()

	proxy(or.(*net.TCPConn), conn)
	return nil
}

// acceptLoop accepts incoming client snowflake connections and passes them to
// handleConn. If orPortSrcAddr is not nil, addresses from the given range are
// used when dialing the ORPOrt.
func acceptLoop(ln net.Listener, orPortSrcAddr *net.IPNet) {
	for {
		conn, err := ln.Accept()
		if err != nil {
			if err, ok := err.(net.Error); ok && err.Temporary() {
				continue
			}
			log.Printf("Snowflake accept error: %s", err)
			break
		}
		go func() {
			defer conn.Close()
			err := handleConn(conn, orPortSrcAddr)
			if err != nil {
				log.Printf("handleConn: %v", err)
			}
		}()
	}
}

func getCertificateCacheDir() (string, error) {
	stateDir, err := pt.MakeStateDir()
	if err != nil {
		return "", err
	}
	return filepath.Join(stateDir, "snowflake-certificate-cache"), nil
}

func main() {
	var acmeEmail string
	var acmeHostnamesCommas string
	var disableTLS bool
	var logFilename string
	var unsafeLogging bool
	var versionFlag bool

	flag.Usage = usage
	flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications")
	flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate")
	flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS")
	flag.StringVar(&logFilename, "log", "", "log file to write to")
	flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed")
	flag.BoolVar(&versionFlag, "version", false, "display version info to stderr and quit")
	flag.Parse()

	if versionFlag {
		fmt.Fprintf(os.Stderr, "snowflake-server %s", version.ConstructResult())
		os.Exit(0)
	}

	log.SetFlags(log.LstdFlags | log.LUTC)

	var logOutput io.Writer = os.Stderr
	if logFilename != "" {
		f, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
		if err != nil {
			log.Fatalf("can't open log file: %s", err)
		}
		defer f.Close()
		logOutput = f
	}
	if unsafeLogging {
		log.SetOutput(logOutput)
	} else {
		// We want to send the log output through our scrubber first
		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
	}

	log.Printf("snowflake-server %s\n", version.GetVersion())

	if !disableTLS && acmeHostnamesCommas == "" {
		log.Fatal("the --acme-hostnames option is required")
	}
	acmeHostnames := strings.Split(acmeHostnamesCommas, ",")

	log.Printf("starting")
	var err error
	ptInfo, err = pt.ServerSetup(nil)
	if err != nil {
		log.Fatalf("error in setup: %s", err)
	}
	pt.ReportVersion("snowflake-server", version.GetVersion())

	go statsThread()

	var certManager *autocert.Manager
	if !disableTLS {
		log.Printf("ACME hostnames: %q", acmeHostnames)

		var cache autocert.Cache
		cacheDir, err := getCertificateCacheDir()
		if err == nil {
			log.Printf("caching ACME certificates in directory %q", cacheDir)
			cache = autocert.DirCache(cacheDir)
		} else {
			log.Printf("disabling ACME certificate cache: %s", err)
		}

		certManager = &autocert.Manager{
			Prompt:     autocert.AcceptTOS,
			HostPolicy: autocert.HostWhitelist(acmeHostnames...),
			Email:      acmeEmail,
			Cache:      cache,
		}
	}

	// The ACME HTTP-01 responder only works when it is running on port 80.
	// We actually open the port in the loop below, so that any errors can
	// be reported in the SMETHOD-ERROR of some bindaddr.
	// https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http-challenge
	needHTTP01Listener := !disableTLS

	listeners := make([]net.Listener, 0)
	for _, bindaddr := range ptInfo.Bindaddrs {
		if bindaddr.MethodName != ptMethodName {
			pt.SmethodError(bindaddr.MethodName, "no such method")
			continue
		}

		if needHTTP01Listener {
			addr := *bindaddr.Addr
			addr.Port = 80
			log.Printf("Starting HTTP-01 ACME listener")
			var lnHTTP01 *net.TCPListener
			lnHTTP01, err := net.ListenTCP("tcp", &addr)
			if err != nil {
				log.Printf("error opening HTTP-01 ACME listener: %s", err)
				pt.SmethodError(bindaddr.MethodName, "HTTP-01 ACME listener: "+err.Error())
				continue
			}
			server := &http.Server{
				Addr:    addr.String(),
				Handler: certManager.HTTPHandler(nil),
			}
			go func() {
				log.Fatal(server.Serve(lnHTTP01))
			}()
			listeners = append(listeners, lnHTTP01)
			needHTTP01Listener = false
		}

		// We're not capable of listening on port 0 (i.e., an ephemeral port
		// unknown in advance). The reason is that while the net/http package
		// exposes ListenAndServe and ListenAndServeTLS, those functions never
		// return, so there's no opportunity to find out what the port number
		// is, in between the Listen and Serve steps.
		// https://groups.google.com/d/msg/Golang-nuts/3F1VRCCENp8/3hcayZiwYM8J
		if bindaddr.Addr.Port == 0 {
			err := fmt.Errorf(
				"cannot listen on port %d; configure a port using ServerTransportListenAddr",
				bindaddr.Addr.Port)
			log.Printf("error opening listener: %s", err)
			pt.SmethodError(bindaddr.MethodName, err.Error())
			continue
		}

		var transport *sf.Transport
		args := pt.Args{}
		if disableTLS {
			args.Add("tls", "no")
			transport = sf.NewSnowflakeServer(nil)
		} else {
			args.Add("tls", "yes")
			for _, hostname := range acmeHostnames {
				args.Add("hostname", hostname)
			}
			transport = sf.NewSnowflakeServer(certManager.GetCertificate)
		}

		// Are we requested to use source addresses from a particular
		// range when dialing the ORPort for this transport?
		var orPortSrcAddr *net.IPNet
		if orPortSrcAddrCIDR, ok := bindaddr.Options.Get("orport-srcaddr"); ok {
			ipnet, err := parseIPCIDR(orPortSrcAddrCIDR)
			if err != nil {
				err = fmt.Errorf("parsing srcaddr: %w", err)
				log.Println(err)
				pt.SmethodError(bindaddr.MethodName, err.Error())
				continue
			}
			orPortSrcAddr = ipnet
		}

		numKCPInstances := 1
		// Are we requested to run a certain number of KCP state
		// machines?
		if value, ok := bindaddr.Options.Get("num-turbotunnel"); ok {
			n, err := strconv.Atoi(value)
			if err == nil && n < 1 {
				err = fmt.Errorf("cannot be less than 1")
			}
			if err != nil {
				err = fmt.Errorf("parsing num-turbotunnel: %w", err)
				log.Println(err)
				pt.SmethodError(bindaddr.MethodName, err.Error())
				continue
			}
			numKCPInstances = n
		}

		ln, err := transport.Listen(bindaddr.Addr, numKCPInstances)
		if err != nil {
			log.Printf("error opening listener: %s", err)
			pt.SmethodError(bindaddr.MethodName, err.Error())
			continue
		}
		defer ln.Close()
		go acceptLoop(ln, orPortSrcAddr)
		pt.SmethodArgs(bindaddr.MethodName, bindaddr.Addr, args)
		listeners = append(listeners, ln)
	}
	pt.SmethodsDone()

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGTERM)

	if os.Getenv("TOR_PT_EXIT_ON_STDIN_CLOSE") == "1" {
		// This environment variable means we should treat EOF on stdin
		// just like SIGTERM: https://bugs.torproject.org/15435.
		go func() {
			if _, err := io.Copy(io.Discard, os.Stdin); err != nil {
				log.Printf("error copying os.Stdin to io.Discard: %v", err)
			}
			log.Printf("synthesizing SIGTERM because of stdin close")
			sigChan <- syscall.SIGTERM
		}()
	}

	// Wait for a signal.
	sig := <-sigChan

	// Signal received, shut down.
	log.Printf("caught signal %q, exiting", sig)
	for _, ln := range listeners {
		ln.Close()
	}
}
07070100000093000081A400000000000000000000000167D9BD4E00000368000000000000000000000000000000000000002100000000snowflake-2.11.0/server/stats.gopackage main

// This code handles periodic statistics logging.
//
// The only thing it keeps track of is how many connections had the client_ip
// parameter. Write true to statsChannel to record a connection with client_ip;
// write false for without.

import (
	"log"
	"time"
)

const (
	statsInterval = 24 * time.Hour
)

var (
	statsChannel = make(chan bool)
)

func statsThread() {
	var numClientIP, numConnections uint64
	prevTime := time.Now()
	deadline := time.After(statsInterval)
	for {
		select {
		case v := <-statsChannel:
			if v {
				numClientIP++
			}
			numConnections++
		case <-deadline:
			now := time.Now()
			log.Printf("in the past %.f s, %d/%d connections had client_ip",
				(now.Sub(prevTime)).Seconds(),
				numClientIP, numConnections)
			numClientIP = 0
			numConnections = 0
			prevTime = now
			deadline = time.After(statsInterval)
		}
	}
}
07070100000094000081A400000000000000000000000167D9BD4E00000104000000000000000000000000000000000000001E00000000snowflake-2.11.0/server/torrcSocksPort 0
ORPort 9001
ExtORPort auto
BridgeRelay 1

ServerTransportListenAddr snowflake 0.0.0.0:443
ServerTransportPlugin snowflake exec ./server --acme-hostnames snowflake.example --acme-email admin@snowflake.example --log /var/log/tor/snowflake-server.log
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!1323 blocks
openSUSE Build Service is sponsored by