File backhand-0.23.0.obscpio of Package backhand
07070100000000000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001700000000backhand-0.23.0/.cargo07070100000001000081A40000000000000000000000016854DB9500000095000000000000000000000000000000000000002300000000backhand-0.23.0/.cargo/config.toml[env]
# 1. Always remove debug asserts
CFLAGS = "-D NDEBUG=1"
[target."x86_64-unknown-linux-musl"]
rustflags = ["-C", "target-feature=+crt-static"]
07070100000002000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001800000000backhand-0.23.0/.github07070100000003000081A40000000000000000000000016854DB9500000018000000000000000000000000000000000000002400000000backhand-0.23.0/.github/FUNDING.ymlgithub: [wcampbell0x2a]
07070100000004000081A40000000000000000000000016854DB9500000066000000000000000000000000000000000000002400000000backhand-0.23.0/.github/codecov.ymlcoverage:
status:
project:
default:
threshold: 1%
patch: false
comment: false
07070100000005000081A40000000000000000000000016854DB9500000255000000000000000000000000000000000000002400000000backhand-0.23.0/.github/labeler.ymlA-backhand-lib:
- changed-files:
- any-glob-to-any-file: backhand/**
A-fuzz:
- changed-files:
- any-glob-to-any-file: fuzz/**
A-backhand-cli:
- changed-files:
- any-glob-to-any-file: backhand-cli/**
A-tests:
- changed-files:
- any-glob-to-any-file: backhand-test/**
A-CI:
- changed-files:
- any-glob-to-any-file: .github/**
A-unsquashfs:
- changed-files:
- any-glob-to-any-file: backhand-cli/src/bin/unsquashfs.rs
A-add:
- changed-files:
- any-glob-to-any-file: backhand-cli/src/bin/add.rs
A-replace:
- changed-files:
- any-glob-to-any-file: backhand-cli/src/bin/replace.rs
07070100000006000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002200000000backhand-0.23.0/.github/workflows07070100000007000081A40000000000000000000000016854DB9500000250000000000000000000000000000000000000003000000000backhand-0.23.0/.github/workflows/benchmark.ymlon: [pull_request]
name: CI Pull Request
jobs:
benchmark:
name: Benchmark
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
- uses: wcampbell0x2a/criterion-compare-action@20e6511506d7c141bcb0e336db78928cc5504870
with:
branchName: ${{ github.base_ref }}
before: "cargo build --bins --release --locked --workspace"
token: ${{ secrets.GITHUB_TOKEN }}
07070100000008000081A40000000000000000000000016854DB9500000BD9000000000000000000000000000000000000002F00000000backhand-0.23.0/.github/workflows/binaries.ymlon:
push:
branches: [ master ]
tags: [ 'v*' ]
pull_request:
branches: [ master ]
env:
BINS: "add-backhand unsquashfs-backhand replace-backhand"
name: binaries
jobs:
# release binaries
release-bins:
runs-on: ${{ matrix.job.os }}
env:
RUSTFLAGS: "-C target-feature=+crt-static"
BUILD_CMD: cargo
strategy:
fail-fast: false
matrix:
job:
- { target: x86_64-unknown-linux-musl, os: ubuntu-24.04, use-cross: true }
- { target: aarch64-unknown-linux-musl, os: ubuntu-24.04, use-cross: true }
- { target: arm-unknown-linux-musleabi, os: ubuntu-24.04, use-cross: true }
- { target: armv7-unknown-linux-musleabi, os: ubuntu-24.04, use-cross: true }
- { target: x86_64-apple-darwin, os: macos-14 }
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- name: Overwrite build command env variable
if: matrix.job.use-cross
shell: bash
run: |
echo "BUILD_CMD=cross" >> $GITHUB_ENV
RUSTFLAGS="-C target-feature=-crt-static" cargo install cross --git https://github.com/cross-rs/cross
- uses: dtolnay/rust-toolchain@master
with:
toolchain: stable
target: ${{ matrix.job.target }}
- run: $BUILD_CMD build -p backhand-cli --bin add-backhand --bin replace-backhand --features xz-static --locked --target ${{ matrix.job.target }} --profile=dist
- run: $BUILD_CMD build -p backhand-cli --bin unsquashfs-backhand --locked --target ${{ matrix.job.target }} --profile=dist --no-default-features --features zstd,xz-static,gzip,backhand-parallel
- name: archive
run: |
tar -czvf backhand-${{ matrix.job.target }}.tar.gz \
-C target/${{ matrix.job.target }}/dist/ $BINS
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: backhand-${{ matrix.job.target }}.tar.gz
path: backhand-${{ matrix.job.target }}.tar.gz
# check semvar before release!
- name: Check semver
env:
# disable static build for this job
RUSTFLAGS: ""
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
uses: obi1kenobi/cargo-semver-checks-action@7272cc2caa468d3e009a2b0a9cc366839348237b # v2.6
with:
package: backhand
feature-group: default-features
- name: Upload binary to release
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
uses: svenstaro/upload-release-action@04733e069f2d7f7f0b4aebc4fbdbce8613b03ccd # v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: backhand-${{ matrix.job.target }}.tar.gz
asset_name: backhand-${{ github.ref_name }}-${{ matrix.job.target }}.tar.gz
tag: ${{ github.ref }}
prerelease: true
overwrite: true
07070100000009000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002800000000backhand-0.23.0/.github/workflows/cache0707010000000A000081A40000000000000000000000016854DB9500000382000000000000000000000000000000000000003300000000backhand-0.23.0/.github/workflows/cache/action.ymlname: Cache Cargo Dependencies
runs:
using: "composite"
steps:
# Cache the global cargo directory, but NOT the local `target` directory which
# we cannot reuse anyway when the nightly changes (and it grows quite large
# over time).
- name: Add cache for cargo
id: cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: |
# Taken from <https://doc.rust-lang.org/nightly/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci>.
~/.cargo/bin
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
# contains package information of crates installed via `cargo install`.
~/.cargo/.crates.toml
~/.cargo/.crates2.json
key: ${{ runner.os }}-rsadsb-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-rsadsb
0707010000000B000081A40000000000000000000000016854DB9500000598000000000000000000000000000000000000002F00000000backhand-0.23.0/.github/workflows/coverage.ymlname: Coverage
on: [pull_request, push]
permissions:
contents: read
jobs:
coverage:
runs-on: ubuntu-24.04
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- run: sudo apt-get install -y squashfs-tools
# Nightly Rust is required for cargo llvm-cov --doc.
- uses: dtolnay/rust-toolchain@nightly
with:
components: llvm-tools-preview
- uses: taiki-e/install-action@cargo-llvm-cov
- uses: taiki-e/install-action@nextest
# generate release builds of the testable binaries
# this is meant to actually run the binary, so this will fail but the binary will be built
- run: cargo llvm-cov run --bin replace-backhand --no-clean --release || true
- run: cargo llvm-cov run --bin add-backhand --no-clean --release || true
- run: cargo llvm-cov run --bin unsquashfs-backhand --no-clean --release || true
- run: cargo llvm-cov --workspace --codecov --output-path codecov.json --features __test_unsquashfs --release --no-clean -- --skip slow
- name: Upload coverage to Codecov
uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: codecov.json
fail_ci_if_error: true
0707010000000C000081A40000000000000000000000016854DB95000000CD000000000000000000000000000000000000002E00000000backhand-0.23.0/.github/workflows/labeler.ymlname: "Pull Request Labeler"
on:
- pull_request_target
jobs:
triage:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-24.04
steps:
- uses: actions/labeler@v5
0707010000000D000081A40000000000000000000000016854DB95000015CE000000000000000000000000000000000000002B00000000backhand-0.23.0/.github/workflows/main.ymlon:
push:
branches:
- master
pull_request:
branches:
- master
schedule: [cron: "40 1 * * *"]
name: ci
jobs:
# build on backhand only supported target
cross-build:
runs-on: ${{ matrix.job.os }}
env:
BUILD_CMD: cargo
strategy:
fail-fast: false
matrix:
job:
- { target: x86_64-pc-windows-gnu, os: ubuntu-24.04, use-cross: true }
toolchain:
- stable
# msrv of backhand-cli
- 1.84
features:
- --no-default-features --features xz
- --no-default-features --features gzip
- --no-default-features --features gzip,xz
- --no-default-features --features gzip,xz,parallel
- --no-default-features --features xz-static
- --no-default-features --features lz4
- --no-default-features --features parallel
# default features
-
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- name: Overwrite build command env variable
if: matrix.job.use-cross
shell: bash
run: |
echo "BUILD_CMD=cross" >> $GITHUB_ENV
RUSTFLAGS="-C target-feature=-crt-static" cargo install cross --git https://github.com/cross-rs/cross
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
# build lib with cross
- run: $BUILD_CMD +${{ matrix.toolchain }} build ${{ matrix.features }} --target ${{ matrix.job.target }} --release --locked --workspace --lib
# build/test all supported targets for library and bins (skipping slow and squashfs-tools tests)
cross-test:
runs-on: ${{ matrix.job.os }}
env:
RUSTFLAGS: "-C target-feature=+crt-static"
BUILD_CMD: cargo
strategy:
fail-fast: false
matrix:
job:
- { target: x86_64-unknown-linux-musl, os: ubuntu-24.04, use-cross: true }
- { target: aarch64-unknown-linux-musl, os: ubuntu-24.04, use-cross: true }
- { target: arm-unknown-linux-musleabi, os: ubuntu-24.04, use-cross: true }
- { target: armv7-unknown-linux-musleabi, os: ubuntu-24.04, use-cross: true }
- { target: aarch64-unknown-linux-musl, os: ubuntu-24.04, use-cross: true }
- { target: x86_64-apple-darwin, os: macos-14, }
toolchain:
- stable
# msrv of backhand-cli
- 1.84
features:
# default features
-
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- name: Overwrite build command env variable
if: matrix.job.use-cross
shell: bash
run: |
echo "BUILD_CMD=cross" >> $GITHUB_ENV
RUSTFLAGS="-C target-feature=-crt-static" cargo install cross --git https://github.com/cross-rs/cross
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
# TODO: really only needed for the matrix variables without use-cross
targets: ${{ matrix.job.target }}
# build lib and bins with cross or cargo
- run: $BUILD_CMD +${{ matrix.toolchain }} build ${{ matrix.features }} --target ${{ matrix.job.target }} --release --locked --workspace --features xz-static
# test with cross, skipping slow test and tests that use more then qemu default memory without use-cross without use-cross
- run: CROSS_CONTAINER_OPTS="--network host" RUST_LOG=info $BUILD_CMD +${{ matrix.toolchain }} test --workspace --release ${{ matrix.features }} --target ${{ matrix.job.target }} --features xz-static --locked -- --skip slow --skip no_qemu
# build/test all supported on native x86_64 arch for library and bins (all tests)
build-test-native:
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
toolchain:
- stable
# msrv of backhand-cli
- 1.84
features:
- --no-default-features --features xz
- --no-default-features --features gzip
- --no-default-features --features gzip,xz
- --no-default-features --features xz-static
- --no-default-features --features lz4
# default features
-
steps:
- run: sudo apt-get install -y squashfs-tools
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master
with:
toolchain: ${{ matrix.toolchain }}
# build bins
- run: cargo +${{ matrix.toolchain }} build ${{ matrix.features }} --release --locked --workspace
# run tests with native unsquashfs on x86_64-unknown-linux-musl (using Cross.toml)
- run: RUST_LOG=info cargo +${{ matrix.toolchain }} test --workspace --release ${{ matrix.features }} --locked --features __test_unsquashfs -- --skip slow
# fmt and clippy on stable
fmt-clippy-stable:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master
with:
toolchain: stable
components: rustfmt, clippy
# fmt
- run: cargo fmt --all -- --check
# clippy
- run: cargo clippy -- -D warnings
0707010000000E000081A40000000000000000000000016854DB9500000498000000000000000000000000000000000000002B00000000backhand-0.23.0/.github/workflows/msrv.ymlon:
push:
branches:
- master
pull_request:
branches:
- master
schedule: [cron: "40 1 * * *"]
name: Check MSRV of backhand
jobs:
build-test-backhand:
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
toolchain:
# msrv of backhand
- 1.84
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
- run: cargo +${{ matrix.toolchain }} check --locked -p backhand
build-test-backhand-cli:
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
toolchain:
# msrv of backhand-cli
- 1.84
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Populate cache
uses: ./.github/workflows/cache
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
- run: cargo +${{ matrix.toolchain }} check --locked -p backhand-cli
0707010000000F000081A40000000000000000000000016854DB9500000023000000000000000000000000000000000000001B00000000backhand-0.23.0/.gitignore**/target
**/test-assets
**/result
07070100000010000081A40000000000000000000000016854DB950000001D000000000000000000000000000000000000001E00000000backhand-0.23.0/.rustfmt.tomluse_small_heuristics = "Max"
07070100000011000081A40000000000000000000000016854DB950000242E000000000000000000000000000000000000001D00000000backhand-0.23.0/BENCHMARK.md# library benchmarks
```
$ cargo bench
```
# compare benchmarks
These benchmarks are created from `bench.bash`, on the following CPU running arch linux:
> [!WARNING]
> This is not meant to be a perfect benchmark against squashfs-tools. Certain features such
> as LTO are used for backhand and it's compression libraries, and are not enabled when using
> squashfs-tools from a package manager.
</details>
<details><summary>lscpu</summary>
```
$ lscpu
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 16
On-line CPU(s) list: 0-15
Vendor ID: AuthenticAMD
Model name: AMD Ryzen 7 9800X3D 8-Core Processor
CPU family: 26
Model: 68
Thread(s) per core: 2
Core(s) per socket: 8
Socket(s): 1
Stepping: 0
Frequency boost: enabled
CPU(s) scaling MHz: 72%
CPU max MHz: 5271.6221
CPU min MHz: 603.3790
BogoMIPS: 9399.97
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good amd_lbr_v2 nopl xtopology nonstop_tsc cpuid extd_apicid aperfmperf rapl pn
i pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mw
aitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba perfmon_v2 ibrs ibpb stibp ibrs_enhanced vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx
512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local user_shstk avx_vnni avx512_bf16 clzero irperf xsaveerptr rdpru wbnoinvd cppc arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassist
s pausefilter pfthreshold avic v_vmsave_vmload vgif x2avic v_spec_ctrl vnmi avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid bus_lock_detect movdiri movdir64b overflow_recov succor s
mca fsrm avx512_vp2intersect flush_l1d amd_lbr_pmc_freeze
Virtualization features:
Virtualization: AMD-V
Caches (sum of all):
L1d: 384 KiB (8 instances)
L1i: 256 KiB (8 instances)
L2: 8 MiB (8 instances)
L3: 96 MiB (1 instance)
NUMA:
NUMA node(s): 1
NUMA node0 CPU(s): 0-15
Vulnerabilities:
Gather data sampling: Not affected
Ghostwrite: Not affected
Indirect target selection: Not affected
Itlb multihit: Not affected
L1tf: Not affected
Mds: Not affected
Meltdown: Not affected
Mmio stale data: Not affected
Reg file data sampling: Not affected
Retbleed: Not affected
Spec rstack overflow: Mitigation; IBPB on VMEXIT only
Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; STIBP always-on; PBRSB-eIBRS Not affected; BHI Not affected
Srbds: Not affected
Tsx async abort: Not affected
```
</details>
```
$ ./bench.bash
```
## Wall time: `backhand/unsquashfs` vs `squashfs-tools/unsquashfs-4.6.1`
### `openwrt-22.03.2-ath79-generic-tplink_archer-a7-v5-squashfs-factory.bin`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 41.6 ± 2.7 | 36.6 | 48.1 | 1.78 ± 0.15 |
| `backhand-dist-musl` | 28.5 ± 1.7 | 25.1 | 34.1 | 1.22 ± 0.10 |
| `backhand-dist-musl-native` | 28.5 ± 1.7 | 25.1 | 32.0 | 1.22 ± 0.10 |
| `backhand-dist-gnu` | 23.4 ± 1.4 | 20.7 | 27.1 | 1.00 ± 0.08 |
| `backhand-dist-gnu-native` | 23.4 ± 1.2 | 20.3 | 26.0 | 1.00 |
| `squashfs-tools` | 71.9 ± 8.4 | 50.5 | 86.5 | 3.07 ± 0.39 |
### `openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 43.1 ± 2.9 | 36.4 | 48.9 | 1.82 ± 0.15 |
| `backhand-dist-musl` | 29.1 ± 1.5 | 26.5 | 32.5 | 1.23 ± 0.09 |
| `backhand-dist-musl-native` | 28.7 ± 1.5 | 25.8 | 34.7 | 1.21 ± 0.09 |
| `backhand-dist-gnu` | 23.8 ± 1.4 | 21.2 | 27.6 | 1.01 ± 0.07 |
| `backhand-dist-gnu-native` | 23.7 ± 1.1 | 20.8 | 25.6 | 1.00 |
| `squashfs-tools` | 64.3 ± 10.6 | 34.5 | 83.3 | 2.72 ± 0.46 |
### `870D97.squashfs`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 229.2 ± 5.4 | 219.7 | 236.3 | 3.38 ± 0.11 |
| `backhand-dist-musl` | 84.6 ± 2.1 | 80.4 | 89.8 | 1.25 ± 0.04 |
| `backhand-dist-musl-native` | 83.4 ± 1.8 | 79.3 | 87.8 | 1.23 ± 0.04 |
| `backhand-dist-gnu` | 67.9 ± 1.6 | 65.1 | 71.5 | 1.00 ± 0.03 |
| `backhand-dist-gnu-native` | 67.9 ± 1.7 | 64.9 | 71.2 | 1.00 |
| `squashfs-tools` | 88.5 ± 12.0 | 67.8 | 111.3 | 1.30 ± 0.18 |
### `img-1571203182_vol-ubi_rootfs.ubifs`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 144.9 ± 5.4 | 137.0 | 157.9 | 1.77 ± 0.12 |
| `backhand-dist-musl` | 98.4 ± 2.6 | 92.6 | 102.4 | 1.20 ± 0.08 |
| `backhand-dist-musl-native` | 97.0 ± 3.7 | 91.1 | 105.1 | 1.19 ± 0.08 |
| `backhand-dist-gnu` | 81.7 ± 4.7 | 76.6 | 95.0 | 1.00 |
| `backhand-dist-gnu-native` | 81.7 ± 3.9 | 77.0 | 91.6 | 1.00 ± 0.08 |
| `squashfs-tools` | 113.5 ± 6.6 | 95.6 | 126.6 | 1.39 ± 0.11 |
### `2611E3.squashfs`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 76.7 ± 5.0 | 68.9 | 89.2 | 1.83 ± 0.16 |
| `backhand-dist-musl` | 52.0 ± 2.5 | 47.3 | 60.0 | 1.24 ± 0.09 |
| `backhand-dist-musl-native` | 51.6 ± 3.4 | 47.4 | 60.0 | 1.23 ± 0.11 |
| `backhand-dist-gnu` | 42.0 ± 2.5 | 38.1 | 47.7 | 1.00 |
| `backhand-dist-gnu-native` | 42.7 ± 2.8 | 37.8 | 49.7 | 1.02 ± 0.09 |
| `squashfs-tools` | 109.6 ± 9.8 | 88.1 | 123.5 | 2.61 ± 0.28 |
### `Plexamp-4.6.1.AppImage`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 288.4 ± 1.6 | 286.2 | 291.7 | 3.74 ± 0.37 |
| `backhand-dist-musl` | 123.3 ± 1.7 | 120.9 | 127.4 | 1.60 ± 0.16 |
| `backhand-dist-musl-native` | 122.7 ± 1.3 | 120.5 | 125.1 | 1.59 ± 0.16 |
| `backhand-dist-gnu` | 113.0 ± 2.7 | 109.3 | 117.7 | 1.47 ± 0.15 |
| `backhand-dist-gnu-native` | 109.9 ± 2.3 | 106.6 | 115.8 | 1.43 ± 0.15 |
| `squashfs-tools` | 77.1 ± 7.7 | 66.0 | 88.9 | 1.00 |
### `crates-io.squashfs`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 351.7 ± 1.6 | 349.3 | 354.4 | 1.00 ± 0.01 |
| `backhand-dist-musl` | 350.6 ± 2.1 | 347.9 | 354.6 | 1.00 |
| `backhand-dist-musl-native` | 355.3 ± 4.0 | 350.8 | 364.4 | 1.01 ± 0.01 |
| `backhand-dist-gnu` | 403.7 ± 4.0 | 398.0 | 408.7 | 1.15 ± 0.01 |
| `backhand-dist-gnu-native` | 412.5 ± 4.1 | 404.6 | 418.9 | 1.18 ± 0.01 |
| `squashfs-tools` | 754.3 ± 12.6 | 734.8 | 771.6 | 2.15 ± 0.04 |
### `airootfs.sfs`
| Command | Mean [ms] | Min [ms] | Max [ms] | Relative |
|:---|---:|---:|---:|---:|
| `backhand-dist-v0.22.0-musl` | 2.8 ± 0.2 | 2.0 | 3.6 | 1.02 ± 0.13 |
| `backhand-dist-musl` | 3.2 ± 0.3 | 2.0 | 4.1 | 1.17 ± 0.17 |
| `backhand-dist-musl-native` | 3.1 ± 0.2 | 1.9 | 3.5 | 1.15 ± 0.14 |
| `backhand-dist-gnu` | 2.7 ± 0.3 | 1.7 | 3.6 | 1.00 |
| `backhand-dist-gnu-native` | 3.2 ± 0.3 | 1.8 | 3.8 | 1.20 ± 0.17 |
| `squashfs-tools` | 3.4 ± 0.2 | 2.0 | 3.9 | 1.25 ± 0.16 |
## Heap Usage: `backhand/unsquashfs` vs `squashfs-tools/unsquashfs-4.6.1`
```
$ cargo +stable build -p backhand-cli --bins --locked --profile=dist
```
| Command | Peak Heap Memory Consumption |
| :------ | ---------------------------: |
| `heaptrack ./target/dist/unsquashfs-backhand --quiet -f -d $(mktemp -d) backhand-test/test-assets/test_re815_xev160/870D97.squashfs` | 34.4MB |
| `heaptrack unsquashfs -quiet -no-progress -d $(mktemp -d) backhand-test/test-assets/test_re815_xev160/870D97.squashfs` | 76.8MB |
| Command | Peak Heap Memory Consumption |
| :------ | ---------------------------: |
| `heaptrack ./target/dist/unsquashfs-backhand --quiet -f -d $(mktemp -d) backhand-test/test-assets/test_tplink_ax1800/img-1571203182_vol-ubi_rootfs.ubifs` | 52.3MB |
| `heaptrack unsquashfs -d $(mktemp -d) backhand-test/test-assets/test_tplink_ax1800/img-1571203182_vol-ubi_rootfs.ubifs` | 103.4MB |
07070100000012000081A40000000000000000000000016854DB9500013956000000000000000000000000000000000000001D00000000backhand-0.23.0/CHANGELOG.md# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [v0.23.0] - 2025-06-19
### `backhand`
- Add feature `parallel`, which enables internal parallelization when de-compressing data. When `parallel` is not used, the old behavior of reading without parallelization is used. ([#716](https://github.com/wcampbell0x2a/backhand/pull/716))
- This substantially increases the speed of backhand-unsquashfs, removing about half of the wall time! See the new benchmarks for details.
- Fix misaligned pointer loads when using Deku. thanks @bdash! ([#713](https://github.com/wcampbell0x2a/backhand/pull/713))
- Fix incorrect assertion about file size ([#730](https://github.com/wcampbell0x2a/backhand/pull/730))
- Use rust library `liblzma` instead of `xz2`. This bumps the version of XZ used to 5.8.1. ([#712](https://github.com/wcampbell0x2a/backhand/pull/712))
- This also removes the need for `HAVE_DECODER` defines/CFLAGS when building xz, as `liblzma` enables them when building by default.
### `backhand-cli`
- unsquashfs: Properly flush the file writer
### `backhand-cli`
- Use `backhand` features `parallel` by default (and in release builds). Exposed by using `backhand-parallel`.
## [v0.22.0] - 2025-05-02
### `backhand`
- Update deku to v0.19.0 ([710](https://github.com/wcampbell0x2a/backhand/pull/710))
- Update MSRV to 1.84
### `backhand-cli`
- Update MSRV to 1.84
## [v0.21.0] - 2025-03-08
### `backhand`
- Use `zlib-rs` as the default implementation for `flate2`, added feature `gzip-zlib-ng` to access previous behavior ([#697](https://github.com/wcampbell0x2a/backhand/pull/697))
- Remove `gzip-zune-inflate`, as this had minimal usage ([#697](https://github.com/wcampbell0x2a/backhand/pull/697))
- Bump MSRV to `1.81` ([#693](https://github.com/wcampbell0x2a/backhand/pull/693))
- Add lz4 compression and decompression, thanks @jeromegn! ([#701](https://github.com/wcampbell0x2a/backhand/pull/701))
## [v0.20.0] - 2025-01-17
### `backhand`
- Remove incorrect check for `ExtendedDirectory` index count ([#691](https://github.com/wcampbell0x2a/backhand/pull/691))
- Correctly support `ExtendedFile` ([#691](https://github.com/wcampbell0x2a/backhand/pull/691))
## [v0.19.0] - 2024-11-12
### `backhand`
- Use feature `zlib-ng` for `flate2`, which is enabled when compression option `Gzip` is used. This enables the backend to use [zlib-ng](https://github.com/zlib-ng/zlib-ng), which is faster by default! ([#562](https://github.com/wcampbell0x2a/backhand/pull/562))
- Remove duplicated data when addding new files to a `FilesystemWriter`. This also applies this behavior to the `add` and `replace` binaries. This is controllable with `FilesystemWriter::set_no_duplicate_files`. ([#603](https://github.com/wcampbell0x2a/backhand/pull/603)), ([#594](https://github.com/wcampbell0x2a/backhand/pull/594))
- Increase speed of internal `HashMap`s, by switching to `xxhash` and just using the `inode` as the key in other places.
- Changed `SuperBlock::Flags` to be public.
- Add non-standard CompressionOptions support ([#584](https://github.com/wcampbell0x2a/backhand/pull/584))
- Add `CompressionAction::compression_options` to override the default compression options emitted during writing.
- Add `FilesystemWriter::set_emit_compression_options`
- Support sparse file extraction ([#624](https://github.com/wcampbell0x2a/backhand/pull/624))
- Add `x86_64-pc-windows-gnu` support ([@Wolfyxon](https://github.com/Wolfyxon)) ([#634](https://github.com/wcampbell0x2a/backhand/pull/634))
- Add [zlib-rs](https://github.com/trifectatechfoundation/zlib-rs) support through `--feature gzip-zlib-rs`.
#### Security
- Prevent self referential dirs, which could cause a stack overflow ([#624](https://github.com/wcampbell0x2a/backhand/pull/495))
- Avoid high allocations for high inode count ([#624](https://github.com/wcampbell0x2a/backhand/pull/495))
### `backhand-cli`
- Add `--no-compression-options` to `add` and `replace` to remove compression options from image after modification.
- Add `--pad-len` to `replace` and `add` to control the length of end-of-image padding ([#604](https://github.com/wcampbell0x2a/backhand/pull/604))
- Bump MSRV to `1.77`
### Dependencies
- Bump `thiserror` from 1.0.59 to 2.0.1 ([#564](https://github.com/wcampbell0x2a/backhand/pull/564), [#578](https://github.com/wcampbell0x2a/backhand/pull/578), [#615](https://github.com/wcampbell0x2a/backhand/pull/615), [#633](https://github.com/wcampbell0x2a/backhand/pull/633))
- Bump `libc` from 0.2.154 to 0.2.162 ([#557](https://github.com/wcampbell0x2a/backhand/pull/557), [#592](https://github.com/wcampbell0x2a/backhand/pull/592), [#616](https://github.com/wcampbell0x2a/backhand/pull/616), [#630](https://github.com/wcampbell0x2a/backhand/pull/630))
- Bump `clap` from 4.5.4 to 4.5.13 ([#569](https://github.com/wcampbell0x2a/backhand/pull/569), [#574](https://github.com/wcampbell0x2a/backhand/pull/574), [#582](https://github.com/wcampbell0x2a/backhand/pull/582))
- Bump `rustc-hash` from 1.1.0 to 2.0.0 ([#570](https://github.com/wcampbell0x2a/backhand/pull/570))
- Bump `clap_complete` from 4.5.2 to 4.5.13 ([#575](https://github.com/wcampbell0x2a/backhand/pull/575), [#595](https://github.com/wcampbell0x2a/backhand/pull/595))
- Bump `document-features` from 0.2.8 to 0.2.10 ([#576](https://github.com/wcampbell0x2a/backhand/pull/576))
- Bump `zstd-safe` from 7.1.0 to 7.2.1 ([#585](https://github.com/wcampbell0x2a/backhand/pull/585))
- Bump `flate2` from 1.0.30 to 1.0.35 ([#593](https://github.com/wcampbell0x2a/backhand/pull/593), [#596](https://github.com/wcampbell0x2a/backhand/pull/596), [#617](https://github.com/wcampbell0x2a/backhand/pull/617), [#641](https://github.com/wcampbell0x2a/backhand/pull/641))
- Bump `zstd` from 0.13.1 to 0.13.2 ([#601](https://github.com/wcampbell0x2a/backhand/pull/601))
- Bump `env_logger` from 0.11.3 to 0.11.5 ([#602](https://github.com/wcampbell0x2a/backhand/pull/602))
- Bump `libdeflater` from 1.21.0 to 1.22.0 ([#619](https://github.com/wcampbell0x2a/backhand/pull/619))
- Bump `tempfile` from 3.12.0 to 3.13.0 ([#618](https://github.com/wcampbell0x2a/backhand/pull/618))
- Bump `nix` from 0.28.0 to 0.29.0 ([#566](https://github.com/wcampbell0x2a/backhand/pull/566))
### Complete API Updates
<details>
<summary>Click to expand</summary>
```diff
Removed items from the public API
=================================
-pub fn backhand::SuperBlock::data_has_been_duplicated(&self) -> bool
Changed items in the public API
===============================
-pub fn backhand::compression::CompressionOptions::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, (endian, compressor): (deku::ctx::Endian, backhand::compression::Compressor)) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::CompressionOptions::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, (endian, compressor): (deku::ctx::Endian, backhand::compression::Compressor)) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::CompressionOptions::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, (endian, compressor): (deku::ctx::Endian, backhand::compression::Compressor)) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::CompressionOptions::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, (endian, compressor): (deku::ctx::Endian, backhand::compression::Compressor)) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Compressor::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Compressor::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Compressor::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Compressor::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Gzip::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Gzip::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Gzip::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Gzip::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Lz4::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Lz4::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Lz4::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Lz4::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Lzo::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Lzo::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Lzo::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Lzo::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Xz::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Xz::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Xz::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Xz::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::compression::Zstd::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::compression::Zstd::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::compression::Zstd::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::compression::Zstd::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::BasicFile::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, (endian, block_size, block_log): (deku::ctx::Endian, u32, u16)) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::BasicFile::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, (endian, block_size, block_log): (deku::ctx::Endian, u32, u16)) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::BasicFile::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, (endian, block_size, block_log): (deku::ctx::Endian, u32, u16)) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::BasicFile::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, (endian, block_size, block_log): (deku::ctx::Endian, u32, u16)) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::DataSize::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::DataSize::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::DataSize::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::DataSize::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::Export::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::Export::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::Export::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::Export::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::Fragment::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::Fragment::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::Fragment::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::Fragment::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::Id::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::Id::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, type_endian: deku::ctx::Endian) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::Id::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::Id::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::Inode::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::Inode::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::Inode::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::Inode::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
-pub backhand::Squashfs::dir_blocks: (rustc_hash::FxHashMap<u64, u64>, alloc::vec::Vec<u8>)
+pub backhand::Squashfs::dir_blocks: (solana_nohash_hasher::IntMap<u64, u64>, alloc::vec::Vec<u8>)
-pub backhand::Squashfs::inodes: rustc_hash::FxHashMap<u32, backhand::Inode>
+pub backhand::Squashfs::inodes: solana_nohash_hasher::IntMap<u32, backhand::Inode>
-pub fn backhand::SuperBlock::from_reader_with_ctx<R: std::io::Read>(__deku_reader: &mut deku::reader::Reader<'_, R>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<Self, deku::error::DekuError>
+pub fn backhand::SuperBlock::from_reader_with_ctx<R: std::io::Read + std::io::Seek>(__deku_reader: &mut deku::reader::Reader<'_, R>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<Self, deku::error::DekuError>
-pub fn backhand::SuperBlock::to_writer<W: std::io::Write>(&self, __deku_writer: &mut deku::writer::Writer<W>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::SuperBlock::to_writer<W: std::io::Write + std::io::Seek>(&self, __deku_writer: &mut deku::writer::Writer<W>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
Added items to the public API
=============================
+pub fn backhand::compression::DefaultCompressor::compression_options(&self, superblock: &mut backhand::SuperBlock, kind: &backhand::kind::Kind, fs_compressor: backhand::FilesystemCompressor) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub fn backhand::compression::DefaultCompressor::compression_options(&self, superblock: &mut backhand::SuperBlock, kind: &backhand::kind::Kind, fs_compressor: backhand::FilesystemCompressor) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub fn backhand::compression::CompressionAction::compression_options(&self, superblock: &mut backhand::SuperBlock, kind: &backhand::kind::Kind, fs_compressor: backhand::FilesystemCompressor) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub enum backhand::Flags
+pub backhand::Flags::CompressorOptionsArePresent = 1024
+pub backhand::Flags::DataBlockStoredUncompressed = 2
+pub backhand::Flags::DataHasBeenDeduplicated = 64
+pub backhand::Flags::FragmentsAreAlwaysGenerated = 32
+pub backhand::Flags::FragmentsAreNotUsed = 16
+pub backhand::Flags::FragmentsStoredUncompressed = 8
+pub backhand::Flags::InodesStoredUncompressed = 1
+pub backhand::Flags::NFSExportTableExists = 128
+pub backhand::Flags::NoXattrsInArchive = 512
+pub backhand::Flags::Unused = 4
+pub backhand::Flags::XattrsAreStoredUncompressed = 256
+impl core::clone::Clone for backhand::Flags
+pub fn backhand::Flags::clone(&self) -> backhand::Flags
+impl core::fmt::Debug for backhand::Flags
+pub fn backhand::Flags::fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result
+impl core::marker::Copy for backhand::Flags
+pub fn backhand::FilesystemWriter<'a, 'b, 'c>::set_emit_compression_options(&mut self, value: bool)
+pub fn backhand::FilesystemWriter<'a, 'b, 'c>::set_no_duplicate_files(&mut self, value: bool)
+pub fn backhand::SuperBlock::data_has_been_deduplicated(&self) -> bool
```
</details>
## [v0.18.0] - 2024-05-24
### `backhand`
- Update MSRV to 1.72.1 ([#524](https://github.com/wcampbell0x2a/backhand/pull/524))
### Dependencies
- Bump `deku` from 0.16.0 to 0.17.0 ([#524](https://github.com/wcampbell0x2a/backhand/pull/524))
- Bump `env_logger` from 0.10.2 to 0.11.3 ([#559](https://github.com/wcampbell0x2a/backhand/pull/559))
### Dependencies
- Bump `libc` from 0.2.153 to 0.2.154 ([#546](https://github.com/wcampbell0x2a/backhand/pull/546))
## [v0.17.0] - 2024-05-06
- Remove unused `BufSeekRewind` and `SeekRewind` traits ([#550](https://github.com/wcampbell0x2a/backhand/pull/550))
- Fix docs.rs build ([#550](https://github.com/wcampbell0x2a/backhand/pull/550))
### Dependencies
- Bump `flate2` from 1.0.28 to 1.0.30 ([#547](https://github.com/wcampbell0x2a/backhand/pull/547))
## [v0.16.0] - 2024-04-25
### `backhand`
- Simplify API by removing `FilesystemReader::alloc_read_buffers()`. This is now handled internally by `FilesystemReader::reader()` ([#530](https://github.com/wcampbell0x2a/backhand/pull/530))
### `backhand-cli`
- Add `x86_64-apple-darwin` support and release binary ([#511](https://github.com/wcampbell0x2a/backhand/pull/511))
- Fix `--help` and correctly show `gzip` support when using `gzip-zune-inflate` ([#538](https://github.com/wcampbell0x2a/backhand/pull/538))
- Fix `--help` and correctly show `xz` support when using `xz-static` ([#541](https://github.com/wcampbell0x2a/backhand/pull/541))
### Dependencies
- Bump `zstd` from 0.13.0 to 0.13.1 ([#518](https://github.com/wcampbell0x2a/backhand/pull/518))
- Bump `rayon` from 1.9.0 to 1.10.0 ([#512](https://github.com/wcampbell0x2a/backhand/pull/512))
- Bump `codecov/codecov-action` from 4.1.0 to 4.3.0 ([#514](https://github.com/wcampbell0x2a/backhand/pull/514), [#526](https://github.com/wcampbell0x2a/backhand/pull/526))
- Bump `obi1kenobi/cargo-semver-checks-action` from 2.3 to 2.4 ([#507](https://github.com/wcampbell0x2a/backhand/pull/507))
- Bump `clap` from 4.5.2 to 4.5.4 ([#513](https://github.com/wcampbell0x2a/backhand/pull/513))
- Bump `libdeflater` from 1.19.3 to 1.20.0 ([#523](https://github.com/wcampbell0x2a/backhand/pull/523))
- Bump `dangoslen/dependabot-changelog-helper` from 3.8.1 to 3.9.0 ([#516](https://github.com/wcampbell0x2a/backhand/pull/516))
- Bump `thiserror` from 1.0.58 to 1.0.59 ([#534](https://github.com/wcampbell0x2a/backhand/pull/534))
- Bump `color-print` from 0.3.5 to 0.3.6 ([#537](https://github.com/wcampbell0x2a/backhand/pull/537))
- Bump `clap_complete` from 4.5.1 to 4.5.2 ([#525](https://github.com/wcampbell0x2a/backhand/pull/525))
#### Complete API Updates
<details>
<summary>Click to expand</summary>
```diff
Removed items from the public API
=================================
-pub fn backhand::FilesystemReader<'b>::alloc_read_buffers(&self) -> (alloc::vec::Vec<u8>, alloc::vec::Vec<u8>)
Changed items in the public API
===============================
-pub fn backhand::FilesystemReaderFile<'a, 'b>::reader(&self, buf_read: &'a mut alloc::vec::Vec<u8>, buf_decompress: &'a mut alloc::vec::Vec<u8>) -> backhand::SquashfsReadFile<'a, 'b>
+pub fn backhand::FilesystemReaderFile<'a, 'b>::reader(&self) -> backhand::SquashfsReadFile<'a, 'b>
Added items to the public API
=============================
(none)
```
</details>
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.15.0...v0.16.0
## [v0.15.0] - 2024-03-24
### `backhand`
- Add support for `Socket` and `NamedFIFO` Inodes in library and extraction binaries. Thanks ([@tnias](https://github.com/tnias)) ([#472](https://github.com/wcampbell0x2a/backhand/pull/472), [#470](https://github.com/wcampbell0x2a/backhand/pull/470))
- Add `FilesytemWriter::push_fifo` and `FilesystemWriter::push_socket`
- Fix panic found with fuzz testing in `NodeHeader::from_inode` ([#494](https://github.com/wcampbell0x2a/backhand/pull/494))
- Add tests for zstd compression support. Fix bug with zstd writer and added `zstd-safe` dependency ([#504](https://github.com/wcampbell0x2a/backhand/pull/504))
- Added `inline`s to small public functions ([#504](https://github.com/wcampbell0x2a/backhand/pull/504))
- Changed `FilesystemReader.cache` to `RwLock` to reduce blocking time during fragment reading ([#504](https://github.com/wcampbell0x2a/backhand/pull/504))
- Increase performance of reading uncompressed fragments (small files) heavy images by removing unnecessary clones of data ([#504](https://github.com/wcampbell0x2a/backhand/pull/504)). Found by ([@bryangarza](https://github.com/bryangarza)) in ([!503](https://github.com/wcampbell0x2a/backhand/issues/503)).
- Increased performance of reading inodes ([#453](https://github.com/wcampbell0x2a/backhand/pull/453))
- Reduce allocations within `Squashfs.dir_blocks` ([#447](https://github.com/wcampbell0x2a/backhand/pull/447))
- Add pre-allocate before reading `inodes` ([#437](https://github.com/wcampbell0x2a/backhand/pull/437))
- Prevent several bounds check failures found by fuzz testing ([#499](https://github.com/wcampbell0x2a/backhand/pull/499/files))
### `backhand-cli`
- Bump MSRV to `1.74` for `clap-4.5.1` update ([#483](https://github.com/wcampbell0x2a/backhand/pull/483))
### `backhand-test`
- Reduced maximum allocation during testing by free'ing right after full usage ([#504](https://github.com/wcampbell0x2a/backhand/pull/504))
#### unsquashfs
- Performance: Remove progress bar Mutex lock when `--quiet` ([#430](https://github.com/wcampbell0x2a/backhand/pull/430))
### Dependencies
- Bump `actions/upload-artifact` from 4.1.0 to 4.3.1 ([#435](https://github.com/wcampbell0x2a/backhand/pull/435), [#446](https://github.com/wcampbell0x2a/backhand/pull/446), [#465](https://github.com/wcampbell0x2a/backhand/pull/465))
- Bump `env_logger` from 0.10.1 to 0.10.2 ([#432](https://github.com/wcampbell0x2a/backhand/pull/432))
- Bump `rayon` from 1.8.0 to 1.9.0 ([#431](https://github.com/wcampbell0x2a/backhand/pull/431), [#496](https://github.com/wcampbell0x2a/backhand/pull/496))
- Bump `clap` from 4.4.17 to 4.5.2 ([#428](https://github.com/wcampbell0x2a/backhand/pull/428), [#500](https://github.com/wcampbell0x2a/backhand/pull/500))
- Bump `clap_complete` from 4.4.7 to 4.5.1 ([#444](https://github.com/wcampbell0x2a/backhand/pull/444), [#445](https://github.com/wcampbell0x2a/backhand/pull/445), [#482](https://github.com/wcampbell0x2a/backhand/pull/482))
- Bump `codecov/codecov-action` from 3.1.4 to 4.1.0 ([#448](https://github.com/wcampbell0x2a/backhand/pull/448), [#457](https://github.com/wcampbell0x2a/backhand/pull/457), [#458](https://github.com/wcampbell0x2a/backhand/pull/458), [#462](https://github.com/wcampbell0x2a/backhand/pull/462), [#488](https://github.com/wcampbell0x2a/backhand/pull/488), [#493](https://github.com/wcampbell0x2a/backhand/pull/493))
- Bump `obi1kenobi/cargo-semver-checks-action` from 2.2 to 2.3 ([#449](https://github.com/wcampbell0x2a/backhand/pull/449))
- Bump `libc` from 0.2.152 to 0.2.153 ([#459](https://github.com/wcampbell0x2a/backhand/pull/459))
- Bump `dangoslen/dependabot-changelog-helper` from 3.7.0 to 3.8.0 ([#461](https://github.com/wcampbell0x2a/backhand/pull/461))
- Bump `thiserror` from 1.0.56 to 1.0.58 ([#476](https://github.com/wcampbell0x2a/backhand/pull/476), [#502](https://github.com/wcampbell0x2a/backhand/pull/502))
- Bump `indicatif` from 0.17.7 to 0.17.8 ([#477](https://github.com/wcampbell0x2a/backhand/pull/477))
- Bump `libdeflater` from 1.19.0 to 1.19.3 ([#486](https://github.com/wcampbell0x2a/backhand/pull/486), [#498](https://github.com/wcampbell0x2a/backhand/pull/498))
- Bump `assert_cmd` from 2.0.13 to 2.0.14 ([#484](https://github.com/wcampbell0x2a/backhand/pull/484))
- Bump `nix` from 0.27.1 to 0.28.0 ([#489](https://github.com/wcampbell0x2a/backhand/pull/489))
- Bump `test-log` from 0.2.14 to 0.2.15 ([#492](https://github.com/wcampbell0x2a/backhand/pull/492))
- Bump `tempfile` from 3.9.0 to 3.10.1 ([#491](https://github.com/wcampbell0x2a/backhand/pull/491))
- Bump `actions/checkout` from 4.1.1 to 4.1.2 ([#501](https://github.com/wcampbell0x2a/backhand/pull/501))
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.14.2...v0.15.0
## [v0.14.2] - 2024-01-16
### `backhand`
- Enable overflow-checks ([#421](https://github.com/wcampbell0x2a/backhand/pull/421))
- Add feature `gzip-zune-inflate` to add a decompress only option with speed improvements ([#419](https://github.com/wcampbell0x2a/backhand/pull/419))
- Remove allocation for `impl From<BackhandError> for io::Error {` ([#425](https://github.com/wcampbell0x2a/backhand/pull/425))
### `backhand-cli`
- Enable overflow-checks for dist builds ([#421](https://github.com/wcampbell0x2a/backhand/pull/421))
#### unsquashfs
- Use feature `gzip-zune-inflate` for dist build and speed improvements ([#419](https://github.com/wcampbell0x2a/backhand/pull/419))
- Updated benchmarks to show improvement ([#419](https://github.com/wcampbell0x2a/backhand/pull/419))
### Dependencies
- Bump `clap` from 4.4.12 to 4.5.1 ([#417](https://github.com/wcampbell0x2a/backhand/pull/417), [#424](https://github.com/wcampbell0x2a/backhand/pull/424), [#483](https://github.com/wcampbell0x2a/backhand/pull/483))
- Bump `thiserror` from 1.0.53 to 1.0.56 ([#404](https://github.com/wcampbell0x2a/backhand/pull/404))
- Bump `actions/upload-artifact` from 4.0.0 to 4.1.0 ([#423](https://github.com/wcampbell0x2a/backhand/pull/423))
- Bump `libc` from 0.2.151 to 0.2.152 ([#408](https://github.com/wcampbell0x2a/backhand/pull/408))
- Bump `clap_complete` from 4.4.5 to 4.4.7 ([#426](https://github.com/wcampbell0x2a/backhand/pull/426))
- Bump `assert_cmd` from 2.0.12 to 2.0.13 ([#422](https://github.com/wcampbell0x2a/backhand/pull/422))
- Bump `console` from 0.15.7 to 0.15.8 ([#413](https://github.com/wcampbell0x2a/backhand/pull/413))
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.14.1...v0.14.2
## [v0.14.1] - 2024-01-13
### `backhand`
#### Changes
- Fix path to project `README.md` for `crates.io` ([#420](https://github.com/wcampbell0x2a/backhand/pull/420))
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.14.0...v0.14.1
## [v0.14.0] - 2024-01-13
Major changes were made to the organization of this repo, with the library `backhand` now being separated from
the `backhand-cli` package, which is used to install `unsquashfs`, `replace`, and `add`.
### `backhand`
#### Changes
- Following changes were done to allow multi-threaded applications ([#278](https://github.com/wcampbell0x2a/backhand/pull/278))
- Change `RefCell<Box<T>>` into `Arc<Mutex<T>>`
- Change `RefCell<T>` into `Mutex<T>`
- Change `Rc<T>` into `Arc<T>`
- Change `dyn CompressionAction` to `dyn CompressionAction + Send + Sync` for `Kind` uses
- Change `BufReadSeek: BufRead + Seek {}` to `BufReadSeek: BufRead + Seek + Send {}`
- Allow user provided read/write files to not be static ([@rbran](https://github.com/rbran)) ([#285](https://github.com/wcampbell0x2a/backhand/pull/285))
- Bump MSRV to `1.67.1`
- Allow creating and reading uncompressed files ([@rbran](https://github.com/rbran)) ([#365](https://github.com/wcampbell0x2a/backhand/pull/365))
- Allow calling `FilesystemWriter::write` with Owned and RefMut writer ([@rbran](https://github.com/rbran)) ([#361](https://github.com/wcampbell0x2a/backhand/pull/361))
- Push dir, file, etc, with lifetimes unrelated to reader from `from_fs_reader` ([@rbran](https://github.com/rbran)) ([#361](https://github.com/wcampbell0x2a/backhand/pull/361))
For example, the following is now allowed:
```diff
- let mut output = File::create(&args.out).unwrap();
- if let Err(e) = filesystem.write(&mut output) {
+ let output = File::create(&args.out).unwrap();
+ if let Err(e) = filesystem.write(output) {
````
#### Bug Fix
- When creating an empty image using `FilesystemWriter::default()`, correctly create the ID table for UID and GID entries. Reported: ([@hwittenborn](https://github.com/hwittenborn)) ([!250](https://github.com/wcampbell0x2a/backhand/issues/275)), Fixed: ([#275](https://github.com/wcampbell0x2a/backhand/pull/275))
- Remove manual `Clone` impl for `FilesystemReaderFile` ([#277](https://github.com/wcampbell0x2a/backhand/pull/277))
- Increase `DirectoryIndex::name_size` length from 100 to 255. ([@eatradish](https://github.com/eatradish)) ([!282](https://github.com/wcampbell0x2a/backhand/issues/282)), Fixed: ([#283](https://github.com/wcampbell0x2a/backhand/pull/283))
- Prevent `push_file` "file within file", will now return `InvalidFilePath` ([@rbran](https://github.com/rbran)) ([#364](https://github.com/wcampbell0x2a/backhand/pull/364))
- Fix `gid` and `uid` for `push_dir_all(..)` ([#360](https://github.com/wcampbell0x2a/backhand/pull/360))
#### Security
- Only allow root and simple filenames into `DirEntry` ([@rbran](https://github.com/rbran)) ([#271](https://github.com/wcampbell0x2a/backhand/pull/271))
### `backhand-cli`
#### Changes to All
- `strip` and `LTO` are enabled for release binaries
- Fix macOS builds ([#260](https://github.com/wcampbell0x2a/backhand/pull/260))
- Bump MSRV to `1.73.0` to use now stabilized `std::os::unix::fs::lchown`
- Add color styling to help output ([#387](https://github.com/wcampbell0x2a/backhand/pull/387))
#### unsquashfs
- Changed name to `unsquashfs-backhand` ([#356](https://github.com/wcampbell0x2a/backhand/pull/356))
- Add progress bar for a cleaner output when extracting files ([#272](https://github.com/wcampbell0x2a/backhand/pull/272))
- Add `--quiet` for not displaying progress bar and RUST_LOG output ([#272](https://github.com/wcampbell0x2a/backhand/pull/272))
- Add multiple threads for extracting files, giving us the same performance in most cases as `squashfs-tools/unsquashfs`! ([#278](https://github.com/wcampbell0x2a/backhand/pull/278))
#### add
- Changed name to `add-backhand` ([#356](https://github.com/wcampbell0x2a/backhand/pull/356))
#### replace
- Changed name to `replace-backhand` ([#356](https://github.com/wcampbell0x2a/backhand/pull/356))
### ci
- Add testing and release binaries for the following platforms:([#259](https://github.com/wcampbell0x2a/backhand/pull/259))
- `aarch64-unknown-linux-musl`
- `arm-unknown-linux-musleabi`
- `x86_64-unknown-linux-musl` (previously already release supported)
- Testing and release binaries were not added for macOS, support was tested on that platform.
### testing
- Replace curl in test dependency `test-assets` with ureq ([#264](https://github.com/wcampbell0x2a/backhand/pull/264))
- Replace `zune-inflate` with `libdeflater` for custom decompression testing for reliability ([#325](https://github.com/wcampbell0x2a/backhand/pull/325))
### Dependencies
- Bump `flate2` from 1.0.26 to 1.0.28 ([#307](https://github.com/wcampbell0x2a/backhand/pull/307))
- Bump `jemallocator` from 0.5.0 to 0.5.4 ([#305](https://github.com/wcampbell0x2a/backhand/pull/305))
- Bump `env_logger` from 0.10.0 to 0.10.1 ([#341](https://github.com/wcampbell0x2a/backhand/pull/341))
- Bump `clap` from 4.4.7 to 4.4.12 ([#340](https://github.com/wcampbell0x2a/backhand/pull/340), [#371](https://github.com/wcampbell0x2a/backhand/pull/371), [#376](https://github.com/wcampbell0x2a/backhand/pull/376), [#399](https://github.com/wcampbell0x2a/backhand/pull/399))
- Bump `dangoslen/dependabot-changelog-helper` from 3.5.0 to 3.7.0 ([#342](https://github.com/wcampbell0x2a/backhand/pull/342), [#369](https://github.com/wcampbell0x2a/backhand/pull/369))
- Bump `tracing-subscriber` from 0.3.17 to 0.3.18 ([#347](https://github.com/wcampbell0x2a/backhand/pull/347))
- Bump `byte-unit` from 4.0.19 to 5.0.3 ([#367](https://github.com/wcampbell0x2a/backhand/pull/367))
- Bump `actions/labeler` from 4 to 5 ([#377](https://github.com/wcampbell0x2a/backhand/pull/377))
- Bump `test-log` from 0.2.13 to 0.2.14 ([#378](https://github.com/wcampbell0x2a/backhand/pull/378))
- Bump `clap_complete` from 4.4.4 to 4.4.5 ([#393](https://github.com/wcampbell0x2a/backhand/pull/393))
- Bump `thiserror` from 1.0.51 to 1.0.53 ([#391](https://github.com/wcampbell0x2a/backhand/pull/391), [#401](https://github.com/wcampbell0x2a/backhand/pull/401))
- Bump `actions/upload-artifact` from 3.1.3 to 4.0.0 ([#380](https://github.com/wcampbell0x2a/backhand/pull/380))
- Bump `tempfile` from 3.8.1 to 3.9.0 ([#398](https://github.com/wcampbell0x2a/backhand/pull/398))
- Bump `document-features` from 0.2.7 to 0.2.8 ([#400](https://github.com/wcampbell0x2a/backhand/pull/400))
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.13.0...v0.14.0
## [v0.13.0] - 2023-06-18
### backhand
#### Changes
- Decrease in memory usage for file reader and write ([#255](https://github.com/wcampbell0x2a/backhand/pull/255))
- Remove unnecessary deconstruction/reconstruction of Vec when reading inodes ([@rbran](https://github.com/rbran)) ([#251](https://github.com/wcampbell0x2a/backhand/pull/251))
- Only store file data compressed if it results in smaller size ([@rbran](https://github.com/rbran)) ([#250](https://github.com/wcampbell0x2a/backhand/pull/250))
- Remove `lzo` being a default feature because of GPL license ([#240](https://github.com/wcampbell0x2a/backhand/pull/240))
- Add support for OpenWRT compression options ([#239](https://github.com/wcampbell0x2a/backhand/pull/239))
- Bump MSRV to `1.65.0` for latest `clap` requirements ([#253](https://github.com/wcampbell0x2a/backhand/pull/253))
#### Bug Fix
- Fix bug in generating Uid and Gid's with `FilesystemWriter`. All internal representation of Gid and Uid are changed to u32 ([#254](https://github.com/wcampbell0x2a/backhand/pull/254))
- Remove case where invalid filesystem root_inode_offset would cause invalid bounds read panic. Found by fuzzer ([#245](https://github.com/wcampbell0x2a/backhand/pull/245))
#### Complete API Updates
```
$ cargo public-api -ss diff v0.12.0..HEAD
```
<details>
<summary>Click to expand</summary>
```diff
Removed items from the public API
=================================
(none)
Changed items in the public API
===============================
-pub fn backhand::FilesystemWriter<'a>::set_root_gid(&mut self, gid: u16)
+pub fn backhand::FilesystemWriter<'a>::set_root_gid(&mut self, gid: u32)
-pub fn backhand::FilesystemWriter<'a>::set_root_uid(&mut self, uid: u16)
+pub fn backhand::FilesystemWriter<'a>::set_root_uid(&mut self, uid: u32)
-pub backhand::NodeHeader::gid: u16
+pub backhand::NodeHeader::gid: u32
-pub backhand::NodeHeader::uid: u16
+pub backhand::NodeHeader::uid: u32
-pub fn backhand::NodeHeader::new(permissions: u16, uid: u16, gid: u16, mtime: u32) -> Self
+pub fn backhand::NodeHeader::new(permissions: u16, uid: u32, gid: u32, mtime: u32) -> Self
Added items to the public API
=============================
+pub backhand::compression::Xz::bit_opts: core::option::Option<u16>
+pub backhand::compression::Xz::fb: core::option::Option<u16>
+pub fn backhand::kind::Kind::magic(&self) -> [u8; 4]
+impl backhand::NodeHeader
+pub fn backhand::NodeHeader::from_inode(inode_header: InodeHeader, id_table: &[backhand::Id]) -> Self
```
</details>
### All binaries
#### Changes
- jemalloc is now used for `-musl` release targets for performance reasons ([#254](https://github.com/wcampbell0x2a/backhand/pull/254))
- `HAVE_DECODER_ARM`, `HAVE_DECODER_ARM64`, and `HAVE_DECODER_ARMTHUMB` filter flags are now defined for xz2. This only effects static build created in our CI. ([#254](https://github.com/wcampbell0x2a/backhand/pull/248))
- Add `RUST_LOG` and available Decompressors to `--help` of all binaries ([#242](https://github.com/wcampbell0x2a/backhand/pull/242))
### add
#### Changes
- Add `--dir` to create a empty directory ([#242](https://github.com/wcampbell0x2a/backhand/pull/242))
#### Bug Fix
- Add correctly reading new file metadata from `--file`, force other arguments for `--dir` ([#254](https://github.com/wcampbell0x2a/backhand/pull/254))
### unsquashfs
#### Changes
- Add `--auto-offset` for automatic finding of initial SquashFS offset in image ([#241](https://github.com/wcampbell0x2a/backhand/pull/241))
- Add possible `kind` values to `--help` output ([#236](https://github.com/wcampbell0x2a/backhand/pull/236))
- Add `--path-filter` to limit file extraction to a path ([#237](https://github.com/wcampbell0x2a/backhand/pull/237))
**Full Diff**: https://github.com/wcampbell0x2a/backhand/compare/v0.12.0...v0.13.0
## [v0.12.0] - 2023-05-07
Thanks [@rbran](https://github.com/rbran/) for the contributions!
### backhand
- `Kind` has been extended to take an `CompressionAction` to have a custom compression and decompression
algorithm. This defaults to the `DefaultCompressor` in most situations to be like the Linux kernel
squashfs code. This should allow an ever greater array of custom vendor Squashfs image support.
Many API changes were done to support this, Most of the following changes focus on the Public API that
we expect the normal developer to be using.
- Added method to allow creating image without padding: `FilesystemWriter::set_no_padding()`
- Added method to allow modification to Compression options: `FilesystemCompressor::options(&mut self, options: CompressionOptions)`
- Added `FilesytemWriter::push_dir_all`, following behavior of `std::fs::create_dir_all` and create required parent directories
- Added `FilesystemReader::files()` and `file()` as the new method of reading files from an image.
This change also reduced allocations in use when reading.
```diff
- for node in &filesystem.nodes {
+ for node in filesystem.files() {
```
- Compression Options are now written to the image during `FilesystemWriter.write(..)`
- Removed non-used allocation in `SquashFsReader`. No change in public API.
- Changed `SquashfsReadFile::reader(..)` to reduce the amount of allocation when extracting a file.
This required adding `alloc_read_buffers` to initialize the re-used buffers.
```diff, rust
+// alloc required space for file data readers
+let (mut buf_read, mut buf_decompress) = filesystem.alloc_read_buffers();
-let mut reader = filesystem.file(&file.basic).reader();
+let mut reader = filesystem
+ .file(&file.basic)
+ .reader(&mut buf_read, &mut buf_decompress);
```
- Removed `FilesystemReader::read_file`
- Changed `FilesytemWriter::push_file<P: Into<PathBuf>>(` into `push_file<P: AsRef<Path>>(`.
NOTE: The function will no longer create parent directories! Instead use new `FilesytemWriter::push_dir_all`
- Removed `SquashfsFileSource`
- Changed `FilesystemWriter::push_*()` functions to now return `BackhandError` to avoid duplicate files and invalid file paths.
The following `BackhandError`s were added to support this: `DuplicatedFileName`, `UndefineFileName`, and `InvalidFilePath`.
- Changed `FilesystemWriter::push_block_device<P: Into<PathBuf>>()` into `P: AsRef<Path>`
- Changed `FilesystemWriter::push_block_device<P: Into<PathBuf>>()` into `P: AsRef<Path>`
- Changed `FilesystemWriter::write_with_offset()` to now take `&mut self`
- Changed `FilesystemWriter::write()` to now take `&mut self`
- Removed trait bound from `FilesystemReader`, `FilesystemReaderFile`, and `FilesystemWriter`:
```diff
-pub struct backhand::FilesystemReader<R: backhand::ReadSeek>
+pub struct backhand::FilesystemReader
-pub struct backhand::FilesystemReaderFile<'a, R: backhand::ReadSeek>
+pub struct backhand::FilesystemReaderFile<'a>
-pub struct backhand::FilesystemWriter<'a, R: backhand::ReadSeek>
+pub struct backhand::FilesystemWriter<'a>
```
- Changed public fields in `FilesystemReader`:
```diff
-pub root_inode: SquashfsDir,
-pub nodes: Vec<Node<SquashfsFileReader>>,
+pub root: Nodes<SquashfsFileReader>,
```
- `FilesystemReader::from_reader_*()` functions now take `BufReadSeek` for an increase in performance during reading for some images.
#### Detailed Changed/Added/Removed
```
$ cargo public-api -ss diff v0.11.0..HEAD
```
<details>
<summary>Click to expand</summary>
```diff
Removed items from the public API
=================================
-pub fn backhand::kind::Kind::new() -> Self
-impl core::default::Default for backhand::kind::Kind
-pub fn backhand::kind::Kind::default() -> Self
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Export
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Export
-pub fn backhand::kind::Kind::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::kind::Kind::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::kind::Kind::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::kind::Kind::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Fragment
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Fragment
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Id
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::Id
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::SuperBlock
-impl deku::DekuRead<'_, backhand::kind::Kind> for backhand::SuperBlock
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Export
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Export
-pub fn backhand::kind::Kind::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::kind::Kind::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::kind::Kind::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::kind::Kind::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Fragment
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Fragment
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Id
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::Id
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::SuperBlock
-impl deku::DekuWrite<backhand::kind::Kind> for backhand::SuperBlock
-impl core::clone::Clone for backhand::kind::Kind
-pub fn backhand::kind::Kind::clone(&self) -> backhand::kind::Kind
-impl core::cmp::Eq for backhand::kind::Kind
-impl core::cmp::PartialEq<backhand::kind::Kind> for backhand::kind::Kind
-pub fn backhand::kind::Kind::eq(&self, other: &backhand::kind::Kind) -> bool
-impl core::marker::Copy for backhand::kind::Kind
-impl core::marker::StructuralEq for backhand::kind::Kind
-impl core::marker::StructuralPartialEq for backhand::kind::Kind
-pub enum backhand::SquashfsFileSource<'a, R: backhand::ReadSeek>
-pub backhand::SquashfsFileSource::SquashfsFile(backhand::FilesystemReaderFile<'a, R>)
-pub backhand::SquashfsFileSource::UserDefined(core::cell::RefCell<alloc::boxed::Box<(dyn std::io::Read + 'a)>>)
-pub fn backhand::Export::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::Export::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub backhand::FilesystemReader::nodes: alloc::vec::Vec<backhand::Node<backhand::SquashfsFileReader>>
-pub backhand::FilesystemReader::root_inode: backhand::SquashfsDir
-impl<R: backhand::ReadSeek> backhand::FilesystemReader<R>
-impl<R: backhand::ReadSeek> backhand::FilesystemReader<R>
-pub fn backhand::FilesystemReader::file<'a>(&'a self, basic_file: &'a backhand::BasicFile) -> backhand::FilesystemReaderFile<'a, R>
-pub fn backhand::FilesystemReader::read_file(&self, basic_file: &backhand::BasicFile) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
-pub fn backhand::FilesystemReader::from_reader(reader: R) -> core::result::Result<Self, backhand::BackhandError>
-impl<R: backhand::ReadSeek> backhand::FilesystemReader<SquashfsReaderWithOffset<R>>
-pub fn backhand::FilesystemReader::from_reader_with_offset(reader: R, offset: u64) -> core::result::Result<Self, backhand::BackhandError>
-pub fn backhand::FilesystemReader::from_reader_with_offset_and_kind(reader: R, offset: u64, kind: backhand::kind::Kind) -> core::result::Result<Self, backhand::BackhandError>
-impl<R: core::fmt::Debug + backhand::ReadSeek> core::fmt::Debug for backhand::FilesystemReader<R>
-pub fn backhand::FilesystemReader::fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result
-impl<'a, R: backhand::ReadSeek> backhand::FilesystemReaderFile<'a, R>
-pub fn backhand::FilesystemReaderFile::new(system: &'a backhand::FilesystemReader<R>, basic: &'a backhand::BasicFile) -> Self
-pub fn backhand::FilesystemReaderFile::reader(&self) -> SquashfsReadFile<'a, R>
-impl<'a, R: backhand::ReadSeek> core::clone::Clone for backhand::FilesystemReaderFile<'a, R>
-impl<'a, R: backhand::ReadSeek> core::iter::traits::collect::IntoIterator for backhand::FilesystemReaderFile<'a, R>
-impl<'a, R: core::marker::Copy + backhand::ReadSeek> core::marker::Copy for backhand::FilesystemReaderFile<'a, R>
-impl<'a, R: backhand::ReadSeek> backhand::FilesystemWriter<'a, R>
-pub fn backhand::FilesystemWriter::from_fs_reader(reader: &'a backhand::FilesystemReader<R>) -> core::result::Result<Self, backhand::BackhandError>
-pub fn backhand::FilesystemWriter::mut_file<S: core::convert::Into<std::path::PathBuf>>(&mut self, find_path: S) -> core::option::Option<&mut backhand::SquashfsFileWriter<'a, R>>
-pub fn backhand::FilesystemWriter::push_block_device<P: core::convert::Into<std::path::PathBuf>>(&mut self, device_number: u32, path: P, header: backhand::NodeHeader)
-pub fn backhand::FilesystemWriter::push_char_device<P: core::convert::Into<std::path::PathBuf>>(&mut self, device_number: u32, path: P, header: backhand::NodeHeader)
-pub fn backhand::FilesystemWriter::push_dir<P: core::convert::Into<std::path::PathBuf>>(&mut self, path: P, header: backhand::NodeHeader)
-pub fn backhand::FilesystemWriter::push_file<P: core::convert::Into<std::path::PathBuf>>(&mut self, reader: impl std::io::Read + 'a, path: P, header: backhand::NodeHeader)
-pub fn backhand::FilesystemWriter::push_symlink<P: core::convert::Into<std::path::PathBuf>, S: core::convert::Into<std::path::PathBuf>>(&mut self, link: S, path: P, header: backhand::NodeHeader)
-pub fn backhand::FilesystemWriter::replace_file<S: core::convert::Into<std::path::PathBuf>>(&mut self, find_path: S, reader: impl std::io::Read + 'a) -> core::result::Result<(), backhand::BackhandError>
-pub fn backhand::FilesystemWriter::write<W: std::io::Write + std::io::Seek>(&self, w: &mut W) -> core::result::Result<(backhand::SuperBlock, u64), backhand::BackhandError>
-pub fn backhand::FilesystemWriter::write_with_offset<W: std::io::Write + std::io::Seek>(&self, w: &mut W, offset: u64) -> core::result::Result<(backhand::SuperBlock, u64), backhand::BackhandError>
-impl core::default::Default for backhand::FilesystemWriter<'_>
-impl<'a, R: core::fmt::Debug + backhand::ReadSeek> core::fmt::Debug for backhand::FilesystemWriter<'a, R>
-pub fn backhand::Fragment::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::Fragment::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub fn backhand::Id::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::Id::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-impl deku::DekuRead<'_, (u64, u32, u16, backhand::kind::Kind)> for backhand::Inode
-pub fn backhand::Inode::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, (bytes_used, block_size, block_log, kind): (u64, u32, u16, backhand::kind::Kind)) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-impl deku::DekuWrite<(u64, u32, u16, backhand::kind::Kind)> for backhand::Inode
-pub fn backhand::Inode::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, (bytes_used, block_size, block_log, kind): (u64, u32, u16, backhand::kind::Kind)) -> core::result::Result<(), deku::error::DekuError>
-pub backhand::Node::path: std::path::PathBuf
-pub fn backhand::Node::new(path: std::path::PathBuf, inner: backhand::InnerNode<T>) -> Self
-impl<T: core::cmp::Eq> core::cmp::Eq for backhand::Node<T>
-impl<T: core::cmp::PartialEq> core::cmp::PartialEq<backhand::Node<T>> for backhand::Node<T>
-pub fn backhand::Node::eq(&self, other: &backhand::Node<T>) -> bool
-impl<T> core::marker::StructuralEq for backhand::Node<T>
-impl<T> core::marker::StructuralPartialEq for backhand::Node<T>
-pub backhand::Squashfs::data_and_fragments: alloc::vec::Vec<u8>
-impl<R: backhand::ReadSeek> backhand::Squashfs<R>
-impl<R: backhand::ReadSeek> backhand::Squashfs<R>
-pub fn backhand::Squashfs::from_reader(reader: R) -> core::result::Result<backhand::Squashfs<R>, backhand::BackhandError>
-pub fn backhand::Squashfs::into_filesystem_reader(self) -> core::result::Result<backhand::FilesystemReader<R>, backhand::BackhandError>
-impl<R: backhand::ReadSeek> backhand::Squashfs<SquashfsReaderWithOffset<R>>
-pub fn backhand::Squashfs::from_reader_with_offset(reader: R, offset: u64) -> core::result::Result<backhand::Squashfs<SquashfsReaderWithOffset<R>>, backhand::BackhandError>
-pub fn backhand::Squashfs::from_reader_with_offset_and_kind(reader: R, offset: u64, kind: backhand::kind::Kind) -> core::result::Result<backhand::Squashfs<SquashfsReaderWithOffset<R>>, backhand::BackhandError>
-pub backhand::SquashfsBlockDevice::header: backhand::NodeHeader
-pub backhand::SquashfsCharacterDevice::header: backhand::NodeHeader
-pub backhand::SquashfsDir::header: backhand::NodeHeader
-pub backhand::SquashfsFileReader::header: backhand::NodeHeader
-pub struct backhand::SquashfsFileWriter<'a, R: backhand::ReadSeek>
-pub backhand::SquashfsFileWriter::header: backhand::NodeHeader
-pub backhand::SquashfsFileWriter::reader: backhand::SquashfsFileSource<'a, R>
-impl<'a, R: backhand::ReadSeek> core::fmt::Debug for backhand::SquashfsFileWriter<'a, R>
-pub backhand::SquashfsSymlink::header: backhand::NodeHeader
-pub const backhand::SuperBlock::NOT_SET: u64
-pub fn backhand::SuperBlock::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
-pub fn backhand::SuperBlock::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, kind: backhand::kind::Kind) -> core::result::Result<(), deku::error::DekuError>
-pub trait backhand::ReadSeek: std::io::Read + std::io::Seek
-impl<T: std::io::Read + std::io::Seek> backhand::ReadSeek for T
Changed items in the public API
===============================
-pub struct backhand::Export(pub u64)
+pub struct backhand::Export
-pub struct backhand::FilesystemReader<R: backhand::ReadSeek>
+pub struct backhand::FilesystemReader
-pub struct backhand::FilesystemReaderFile<'a, R: backhand::ReadSeek>
+pub struct backhand::FilesystemReaderFile<'a>
-pub struct backhand::FilesystemWriter<'a, R: backhand::ReadSeek>
+pub struct backhand::FilesystemWriter<'a>
-pub struct backhand::Id(pub u32)
+pub struct backhand::Id
-pub struct backhand::Squashfs<R: backhand::ReadSeek>
+pub struct backhand::Squashfs
Added items to the public API
=============================
+pub struct backhand::compression::DefaultCompressor
+impl backhand::compression::CompressionAction for backhand::compression::DefaultCompressor
+impl backhand::compression::CompressionAction for backhand::compression::DefaultCompressor
+pub fn backhand::compression::DefaultCompressor::compress(&self, bytes: &[u8], fc: backhand::FilesystemCompressor, block_size: u32) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub fn backhand::compression::DefaultCompressor::decompress(&self, bytes: &[u8], out: &mut alloc::vec::Vec<u8>, compressor: backhand::compression::Compressor) -> core::result::Result<(), backhand::BackhandError>
+impl core::clone::Clone for backhand::compression::DefaultCompressor
+pub fn backhand::compression::DefaultCompressor::clone(&self) -> backhand::compression::DefaultCompressor
+impl core::marker::Copy for backhand::compression::DefaultCompressor
+pub trait backhand::compression::CompressionAction
+pub fn backhand::compression::CompressionAction::compress(&self, bytes: &[u8], fc: backhand::FilesystemCompressor, block_size: u32) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub fn backhand::compression::CompressionAction::compress(&self, bytes: &[u8], fc: backhand::FilesystemCompressor, block_size: u32) -> core::result::Result<alloc::vec::Vec<u8>, backhand::BackhandError>
+pub fn backhand::compression::CompressionAction::decompress(&self, bytes: &[u8], out: &mut alloc::vec::Vec<u8>, compressor: backhand::compression::Compressor) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::compression::CompressionAction::decompress(&self, bytes: &[u8], out: &mut alloc::vec::Vec<u8>, compressor: backhand::compression::Compressor) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::kind::Kind::from_const(inner: InnerKind<dyn backhand::compression::CompressionAction>) -> core::result::Result<backhand::kind::Kind, alloc::string::String>
+pub fn backhand::kind::Kind::from_kind(kind: &backhand::kind::Kind) -> backhand::kind::Kind
+pub fn backhand::kind::Kind::from_target(s: &str) -> core::result::Result<backhand::kind::Kind, alloc::string::String>
+pub fn backhand::kind::Kind::new<C: backhand::compression::CompressionAction>(compressor: &'static C) -> Self
+pub fn backhand::kind::Kind::new_with_const<C: backhand::compression::CompressionAction>(compressor: &'static C, c: InnerKind<dyn backhand::compression::CompressionAction>) -> Self
+pub backhand::BackhandError::DuplicatedFileName
+pub backhand::BackhandError::InvalidFilePath
+pub backhand::BackhandError::UndefineFileName
+pub enum backhand::SquashfsFileWriter<'a>
+pub backhand::SquashfsFileWriter::Consumed(usize, Added)
+pub backhand::SquashfsFileWriter::SquashfsFile(backhand::FilesystemReaderFile<'a>)
+pub backhand::SquashfsFileWriter::UserDefined(core::cell::RefCell<alloc::boxed::Box<(dyn std::io::Read + 'a)>>)
+impl<'a> core::fmt::Debug for backhand::SquashfsFileWriter<'a>
+pub backhand::Export::num: u64
+impl deku::DekuRead<'_, deku::ctx::Endian> for backhand::Export
+pub fn backhand::Export::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
+impl deku::DekuWrite<deku::ctx::Endian> for backhand::Export
+pub fn backhand::Export::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub fn backhand::FilesystemCompressor::options(&mut self, options: backhand::compression::CompressionOptions) -> core::result::Result<(), backhand::BackhandError>
+pub backhand::FilesystemReader::root: Nodes<backhand::SquashfsFileReader>
+impl backhand::FilesystemReader
+pub fn backhand::FilesystemReader::alloc_read_buffers(&self) -> (alloc::vec::Vec<u8>, alloc::vec::Vec<u8>)
+pub fn backhand::FilesystemReader::file<'a>(&'a self, basic_file: &'a backhand::BasicFile) -> backhand::FilesystemReaderFile<'_>
+pub fn backhand::FilesystemReader::files(&self) -> impl core::iter::traits::iterator::Iterator<Item = &backhand::Node<backhand::SquashfsFileReader>>
+pub fn backhand::FilesystemReader::from_reader<R: backhand::BufReadSeek + 'static>(reader: R) -> core::result::Result<Self, backhand::BackhandError>
+pub fn backhand::FilesystemReader::from_reader_with_offset<R: backhand::BufReadSeek + 'static>(reader: R, offset: u64) -> core::result::Result<Self, backhand::BackhandError>
+pub fn backhand::FilesystemReader::from_reader_with_offset_and_kind<R: backhand::BufReadSeek + 'static>(reader: R, offset: u64, kind: backhand::kind::Kind) -> core::result::Result<Self, backhand::BackhandError>
+impl<'a> backhand::FilesystemReaderFile<'a>
+pub fn backhand::FilesystemReaderFile::new(system: &'a backhand::FilesystemReader, basic: &'a backhand::BasicFile) -> Self
+pub fn backhand::FilesystemReaderFile::reader(&self, buf_read: &'a mut alloc::vec::Vec<u8>, buf_decompress: &'a mut alloc::vec::Vec<u8>) -> backhand::SquashfsReadFile<'_>
+impl<'a> core::clone::Clone for backhand::FilesystemReaderFile<'a>
+impl<'a> core::iter::traits::collect::IntoIterator for backhand::FilesystemReaderFile<'a>
+impl<'a> core::marker::Copy for backhand::FilesystemReaderFile<'a>
+impl<'a> backhand::FilesystemWriter<'a>
+pub fn backhand::FilesystemWriter::from_fs_reader(reader: &'a backhand::FilesystemReader) -> core::result::Result<Self, backhand::BackhandError>
+pub fn backhand::FilesystemWriter::mut_file<S: core::convert::AsRef<std::path::Path>>(&mut self, find_path: S) -> core::option::Option<&mut backhand::SquashfsFileWriter<'a>>
+pub fn backhand::FilesystemWriter::push_block_device<P: core::convert::AsRef<std::path::Path>>(&mut self, device_number: u32, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::push_char_device<P: core::convert::AsRef<std::path::Path>>(&mut self, device_number: u32, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::push_dir<P: core::convert::AsRef<std::path::Path>>(&mut self, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::push_dir_all<P: core::convert::AsRef<std::path::Path>>(&mut self, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::push_file<P: core::convert::AsRef<std::path::Path>>(&mut self, reader: impl std::io::Read + 'a, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::push_symlink<P: core::convert::AsRef<std::path::Path>, S: core::convert::Into<std::path::PathBuf>>(&mut self, link: S, path: P, header: backhand::NodeHeader) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::replace_file<S: core::convert::AsRef<std::path::Path>>(&mut self, find_path: S, reader: impl std::io::Read + 'a) -> core::result::Result<(), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::set_no_padding(&mut self)
+pub fn backhand::FilesystemWriter::write<W: std::io::Write + std::io::Seek>(&mut self, w: &mut W) -> core::result::Result<(backhand::SuperBlock, u64), backhand::BackhandError>
+pub fn backhand::FilesystemWriter::write_with_offset<W: std::io::Write + std::io::Seek>(&mut self, w: &mut W, offset: u64) -> core::result::Result<(backhand::SuperBlock, u64), backhand::BackhandError>
+impl<'a> core::default::Default for backhand::FilesystemWriter<'a>
+impl<'a> core::fmt::Debug for backhand::FilesystemWriter<'a>
+impl backhand::Fragment
+pub fn backhand::Fragment::new(start: u64, size: backhand::DataSize, unused: u32) -> Self
+impl deku::DekuRead<'_, deku::ctx::Endian> for backhand::Fragment
+pub fn backhand::Fragment::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
+impl deku::DekuWrite<deku::ctx::Endian> for backhand::Fragment
+pub fn backhand::Fragment::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+pub backhand::Id::num: u32
+pub const backhand::Id::SIZE: usize
+pub fn backhand::Id::new(num: u32) -> backhand::Id
+impl deku::DekuRead<'_, deku::ctx::Endian> for backhand::Id
+pub fn backhand::Id::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
+impl deku::DekuWrite<deku::ctx::Endian> for backhand::Id
+pub fn backhand::Id::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, type_endian: deku::ctx::Endian) -> core::result::Result<(), deku::error::DekuError>
+impl backhand::Inode
+pub fn backhand::Inode::new(id: InodeId, header: InodeHeader, inner: InodeInner) -> Self
+impl deku::DekuRead<'_, (u64, u32, u16, deku::ctx::Endian)> for backhand::Inode
+pub fn backhand::Inode::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
+impl deku::DekuWrite<(u64, u32, u16, deku::ctx::Endian)> for backhand::Inode
+pub fn backhand::Inode::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, (bytes_used, block_size, block_log, type_endian): (u64, u32, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
+pub backhand::Node::fullpath: std::path::PathBuf
+pub backhand::Node::header: backhand::NodeHeader
+pub fn backhand::Node::new_root(header: backhand::NodeHeader) -> Self
+impl<T> core::cmp::Eq for backhand::Node<T>
+impl<T> core::cmp::Ord for backhand::Node<T>
+pub fn backhand::Node::cmp(&self, other: &Self) -> core::cmp::Ordering
+impl<T> core::cmp::PartialEq<backhand::Node<T>> for backhand::Node<T>
+pub fn backhand::Node::eq(&self, other: &Self) -> bool
+impl<T> core::cmp::PartialOrd<backhand::Node<T>> for backhand::Node<T>
+pub fn backhand::Node::partial_cmp(&self, other: &Self) -> core::option::Option<core::cmp::Ordering>
+impl<T: core::clone::Clone> core::clone::Clone for backhand::Node<T>
+pub fn backhand::Node::clone(&self) -> backhand::Node<T>
+impl backhand::Squashfs
+pub fn backhand::Squashfs::from_reader(reader: impl backhand::BufReadSeek + 'static) -> core::result::Result<backhand::Squashfs, backhand::BackhandError>
+pub fn backhand::Squashfs::from_reader_with_offset(reader: impl backhand::BufReadSeek + 'static, offset: u64) -> core::result::Result<backhand::Squashfs, backhand::BackhandError>
+pub fn backhand::Squashfs::from_reader_with_offset_and_kind(reader: impl backhand::BufReadSeek + 'static, offset: u64, kind: backhand::kind::Kind) -> core::result::Result<backhand::Squashfs, backhand::BackhandError>
+pub fn backhand::Squashfs::into_filesystem_reader(self) -> core::result::Result<backhand::FilesystemReader, backhand::BackhandError>
+pub fn backhand::Squashfs::superblock_and_compression_options(reader: &mut alloc::boxed::Box<dyn backhand::BufReadSeek>, kind: &backhand::kind::Kind) -> core::result::Result<(backhand::SuperBlock, core::option::Option<backhand::compression::CompressionOptions>), backhand::BackhandError>
+impl core::marker::Copy for backhand::SquashfsBlockDevice
+impl core::marker::Copy for backhand::SquashfsCharacterDevice
+impl core::default::Default for backhand::SquashfsDir
+pub fn backhand::SquashfsDir::default() -> backhand::SquashfsDir
+impl core::marker::Copy for backhand::SquashfsDir
+pub struct backhand::SquashfsReadFile<'a>
+impl<'a> std::io::Read for backhand::SquashfsReadFile<'a>
+pub fn backhand::SquashfsReadFile::read(&mut self, buf: &mut [u8]) -> std::io::error::Result<usize>
+impl backhand::SuperBlock
+impl deku::DekuRead<'_, ([u8; 4], u16, u16, deku::ctx::Endian)> for backhand::SuperBlock
+pub fn backhand::SuperBlock::read(__deku_input_bits: &bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<(&bitvec::slice::BitSlice<u8, bitvec::order::Msb0>, Self), deku::error::DekuError>
+impl deku::DekuWrite<([u8; 4], u16, u16, deku::ctx::Endian)> for backhand::SuperBlock
+pub fn backhand::SuperBlock::write(&self, __deku_output: &mut bitvec::vec::BitVec<u8, bitvec::order::Msb0>, (ctx_magic, ctx_version_major, ctx_version_minor, ctx_type_endian): ([u8; 4], u16, u16, deku::ctx::Endian)) -> core::result::Result<(), deku::error::DekuError>
+pub trait backhand::BufReadSeek: std::io::BufRead + std::io::Seek
+impl<T: std::io::BufRead + std::io::Seek> backhand::BufReadSeek for T
```
</details>
### `unsquashfs`
- Added `--kind` for custom squashfs type image extraction
```
-k, --kind <KIND> Kind(type of image) to parse [default: le_v4_0] [possible values: be_v4_0, le_v4_0, amv_be_v4_0]
```
- Added `--completions` for the generation of shell completions scripts
#### Performance
See https://github.com/wcampbell0x2a/backhand/discussions/145 for more details.
These are benchmarked against the SquashFS image from `TP-Link AXE5400 Mesh Wi-Fi 6E Range Extender`.
##### Speed
For single threaded mode `squashfs-tools/unsquashfs-v4.6.1`, testing on my machine lets me know that
our `backhand/unsquashfs` is around the same speed performance with a single thread.
##### Allocations
Only testing single threaded mode, peak heap memory consumption for `squashfs-tools/unsquashfs-v4.6.1`
is 74.8MB, while our `backhand/unsquashfs` only uses 18.1MB.
## [v0.11.0] - 2023-03-14
### Added
- Support for Read/Write of non-standard custom squashfs images:
- `LE_V4_0`: (linux kernel) Little-Endian default official v4.0
- `BE_V4_0`: Big-Endian v4.0
- `AVM_BE_V4_0`: AVM Fritz!OS firmware support.
- `FilesystemWriter`: Builder pattern used when mutating an image. This includes multiple functions
for the public API. Supporting both raw images and modification made to images that already exist.
- `FilesytemCompressor`: `.compressor` is now `FilesystemCompressor`,
which holds the Id as well as options stored in the image as well as extra options only used when
compressing when creating a new image.
- Add error `InvalidCompressionOption`
- Change default XZ compression level to 6
- Support custom XZ filters for `FilesystemWriter`
- Return `(Superblock, bytes_written)` for `FilesystemWriter::write()`
- Update deku to 0.16.0
- `add`: now reads file details to derive the details when the file is added the image
- `add`: `--mtime`, `--uid`, `--gid` and `--permission` to override file details derived from file
- `unsquashfs`: now correctly extracts ownership and permission details
### Fixed
- `ID` now supports multiple IDs for GUI and UID in the table
- `id_table` is now properly a u64 pointer
- Data is now *not* copied when during the use of a `FilesystemWriter` you decide to change the compression used.
Thanks [@rbran](https://github.com/rbran/)
### Changed
- Renamed `SquashfsError` to `BackhandError`
## [v0.10.1] - 2023-02-22
### Added
- Zstd compression support
### Fixed
- `FilesystemWriter` Debug impl now works
- `FilesystemReader::from_reader_with_offset(..)` now properly respects given offsets
- `FilesystemWriter::write_with_offset(..)` now properly respects given offsets
## [v0.10.0] - 2023-02-20
### Added
- Fuzz testing with `cargo fuzz`. Mostly fuzz bytes as bytes/image input into this library.
- `unsquashfs`: Add `-o, --out <OUT>` flag for output squashfs image destination
- `replace`: Add binary to replace file in squashfs filesystems
- Add support for Lzo compression, and feature `lzo`
### Fixed
- Fixed many issues found with fuzz testing related to legal images.
Checks are now added at every stop possible to prevent many soundness issues.
- Fixed `Compressor` id values for Lzo and Lzma
### Changed
- Pass internal raw data by reference, improving `only_read` benchmarks by ~9%.
- Invalid `Superblock.block_size` is now checked against MiB(1) instead of MB(1)
## [v0.9.1] - 2023-02-16
### Fixed
- Fix `unsquashfs` extracting wrong file data
## [v0.9.0] - 2023-02-13
### Fixed
- `FilesystemWriter::push_file(..)` correctly enters file into filesystem
### Changed
- Remove Result return type from `FilesystemWriter::{push_file(..), push_dir(..), push_symlink(..), push_char_device(..) and push_block_devivce(..)`.
- Remove unused errors: `FieldNotInitialized` and `OsStringToStr`.
## [v0.8.1] - 2023-02-11
- Fix `src/lib.rs` version for docs.rs
## [v0.8.0] - 2023-02-11
### Added
- unsquashfs: Add `--stat`, `--force`, `--info` flags.
- unsquashfs: Add support for Char and Block device file creation when superuser.
- features: `xz` and `gzip`. By default both are enabled, but conditionally you may compile only one type of decompressor.
- `SquashfsError::Unreachable`, `SquashfsError::UnexpectedInode`, `SquashfsError::UnsupportedInode`.
These are all returned by the public API of filesystem and more panics were removed.
### Fixed
- `inode_count` is fixed, previously was +1 the actual inode count.
### Changed
- The Public API of the library has been condensed, lmk if you have lost access to a required struct/field/enum.
- Add `FilesystemReader` and `FilesystemWriter` for lazy-reading the files only when required.
This significantly speeds up the initial read of the filesystem and splits the reading of the filesystem and the writing of the filesystem.
The following diff will cover most common API upgrades from `v0.7.0`
```diff
-let squashfs = Squashfs::from_reader(file).unwrap();
-let mut filesystem = squashfs.into_filesystem().unwrap();
+let filesystem = FilesystemReader::from_reader(file).unwrap();
+let mut filesystem = FilesystemWriter::from_fs_reader(&filesystem).unwrap();
```
```diff
-let filesystem = Filesystem::from_reader(file).unwrap();
+let filesystem = FilesystemReader::from_reader(file).unwrap();
+let mut filesystem = FilesystemWriter::from_fs_reader(&filesystem).unwrap();
```
```diff
-FilesystemHeader
+NodeHeader
```
### Performance
This releases allows massive performance improvements by only reading files from disk when required
and reducing the amount of memory required to read and write an image.
Thanks [@rbran](https://github.com/rbran/) for the incredible work on the performance of the library.
Before:
```
read/write/netgear_ax6100v2
time: [2.3553 s 2.3667 s 2.3775 s]
read/write/tplink_ax1800
time: [17.996 s 18.068 s 18.140 s]
```
After:
```
write_read/netgear_ax6100v2
time: [1.2291 s 1.2363 s 1.2433 s]
write_read/tplink_ax1800
time: [6.7506 s 6.8287 s 6.9349 s]
only_read/netgear_ax6100v2
time: [5.1153 ms 5.1234 ms 5.1305 ms]
only_read/tplink_ax1800
time: [22.383 ms 22.398 ms 22.415 ms]
```
### [v0.7.0] - 2023-01-23
#### Added
- Use `block_size` as XZ default `dict_size` when compressing data
- Add `Filesystem::push_symlink(..)`
- Add `Filesystem::push_dir(..)`
- Add `Filesystem::push_char_device(..)`
- Add `Filesystem::push_block_device(..)`
#### Fixed
- Correctly choose between storing uncompressed and compressed data on which takes the least space
#### Changed
- Improve `unsquashfs` and `add` cli args to match `squashfs-tools/unsquashfs` cli
- `Filesystem::push_file(..)` now takes for bytes anything that is `into Read` instead of `into Vec<u8>`
- `Node::Path` renamed to `Node::Dir`
- `SquashfsPath` renamed to `SquashfsDir`
- `Filesystem::from_reader(..)`, `R` now takes `Read + Seek` instead our own `ReadSeek`
- `Filesystem::from_reader_with_offset(..)`, `R` now takes `Read + Seek` instead our own `ReadSeek`
- `Filesystem::push_symlink(..)` now only needs `path` and `link`
### [v0.6.0] - 2023-01-10
- Fix bug in our filesystem tree causing directory header information (gui, uid, permissions)
to not be saved in resulting filesystem when calling `Filesystem::to_bytes(..)`.
- Rework `filesystem::Node` to be a struct containing the path and `InnerNode`.
This cleans up the inner implementation of the file system tree.
- Make more types public that are useful for Squashfs detailed introspection
- Improve documentation
### [v0.5.0] - 2023-01-08
- Fix warning when compression options isn't standard size
- In `from_reader(..)`, show info about flags used
- Add `Filesystem::from_reader(..)` and `Filesystem::from_reader_with_offset(..)`
which calls `Squashfs::from_reader(..)` and `Squashfs::from_reader_with_offset(..)` and `Squashfs::into_filesystem(..)`.
- 5% Performance increases due to using `Vec::with_capacity(..)` for `fragment_bytes`
- Add Block and Char Device support for Reading and Writing
- Fix error with `inode_offset` mis-calculation
- Fix tail-end fragment support for reading image
- Fix `unsquashfs` file path extraction
### [v0.4.0] - 2023-01-04
- Add `mod_time` from `Squashfs` to `Filesystem` used in creation of new image with `to_bytes(..)`
### [v0.3.0] - 2023-01-03
- Restrict public API
- Improve docs
- Add `Filesystem::push_file(..)` for adding a file, as well as the dirs for the path
- Add `Filesystem::mut_file(..)` for mutating a file at a path already in the filesystem
### [v0.2.1] - 2023-01-02
- Fix Cargo.toml issues
### [v0.2.0] - 2023-01-02
- Add `block_size` and `block_log` to Filesystem. Automatically taken from `Squashfs` when using `into_filesystem()`
- Add support for data fragments for `filesystem::to_bytes()`
- `DirEntry` uses `InodeId` instead of `u8`
### [v0.1.0] - 2023-01-01
- Initial Release
07070100000013000081A40000000000000000000000016854DB950000F0BC000000000000000000000000000000000000001B00000000backhand-0.23.0/Cargo.lock# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "addr2line"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"gimli",
]
[[package]]
name = "adler2"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
version = "0.6.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is_terminal_polyfill",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
[[package]]
name = "anstyle-parse"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882"
dependencies = [
"anstyle",
"once_cell_polyfill",
"windows-sys 0.59.0",
]
[[package]]
name = "assert_cmd"
version = "2.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66"
dependencies = [
"anstream",
"anstyle",
"bstr",
"doc-comment",
"libc",
"predicates",
"predicates-core",
"predicates-tree",
"wait-timeout",
]
[[package]]
name = "autocfg"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "backhand"
version = "0.23.0"
dependencies = [
"assert_cmd",
"criterion",
"deku",
"dir-diff",
"document-features",
"flate2",
"libdeflater",
"liblzma",
"lz4_flex",
"rayon",
"rust-lzo",
"solana-nohash-hasher",
"tempfile",
"test-assets-ureq",
"test-log",
"thiserror",
"tracing",
"xxhash-rust",
"zstd",
"zstd-safe",
]
[[package]]
name = "backhand-cli"
version = "0.23.0"
dependencies = [
"backhand",
"clap",
"clap-cargo",
"clap_complete",
"color-print",
"console",
"indicatif",
"jemallocator",
"libc",
"nix",
"rayon",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "backon"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7"
dependencies = [
"fastrand",
"gloo-timers",
"tokio",
]
[[package]]
name = "backtrace"
version = "0.3.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
"windows-targets 0.52.6",
]
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "bitflags"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
[[package]]
name = "bstr"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4"
dependencies = [
"memchr",
"regex-automata 0.4.9",
"serde",
]
[[package]]
name = "bumpalo"
version = "3.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
version = "1.2.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc"
dependencies = [
"jobserver",
"libc",
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap-cargo"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d546f0e84ff2bfa4da1ce9b54be42285767ba39c688572ca32412a09a73851e5"
dependencies = [
"anstyle",
"clap",
]
[[package]]
name = "clap_builder"
version = "4.5.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
"terminal_size",
]
[[package]]
name = "clap_complete"
version = "4.5.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aad5b1b4de04fead402672b48897030eec1f3bfe1550776322f59f6d6e6a5677"
dependencies = [
"clap",
]
[[package]]
name = "clap_derive"
version = "4.5.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "clap_lex"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "color-print"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3aa954171903797d5623e047d9ab69d91b493657917bdfb8c2c80ecaf9cdb6f4"
dependencies = [
"color-print-proc-macro",
]
[[package]]
name = "color-print-proc-macro"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "692186b5ebe54007e45a59aea47ece9eb4108e141326c304cdc91699a7118a22"
dependencies = [
"nom",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "colorchoice"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "console"
version = "0.15.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8"
dependencies = [
"encode_unicode",
"libc",
"once_cell",
"unicode-width",
"windows-sys 0.59.0",
]
[[package]]
name = "cpufeatures"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
dependencies = [
"libc",
]
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"itertools 0.13.0",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools 0.10.5",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "darling"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core",
"darling_macro",
]
[[package]]
name = "darling_core"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core",
"quote",
"syn",
]
[[package]]
name = "deku"
version = "0.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f476a022dcfbb013d1365734a42e05b6aca967ebe0d3bb38170086abd9ea3324"
dependencies = [
"bitvec",
"deku_derive",
"no_std_io2",
"rustversion",
]
[[package]]
name = "deku_derive"
version = "0.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb216d425bdf810c165a8ae1649523033e88b5f795480ccec63926295541b084"
dependencies = [
"darling",
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "difflib"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
]
[[package]]
name = "dir-diff"
version = "0.3.3-alpha.0"
source = "git+https://github.com/wcampbell0x2a/dir-diff?branch=add-checking-permissions#13dddcfcb8fe6d32cc4c3e95a0a364d56dae904c"
dependencies = [
"walkdir",
]
[[package]]
name = "displaydoc"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "doc-comment"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
[[package]]
name = "document-features"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d"
dependencies = [
"litrs",
]
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "encode_unicode"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
[[package]]
name = "env_filter"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
dependencies = [
"log",
"regex",
]
[[package]]
name = "env_logger"
version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f"
dependencies = [
"anstream",
"anstyle",
"env_filter",
"jiff",
"log",
]
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "errno"
version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
dependencies = [
"libc",
"windows-sys 0.60.2",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "flate2"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
dependencies = [
"crc32fast",
"libz-rs-sys",
"miniz_oxide",
]
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "form_urlencoded"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [
"percent-encoding",
]
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures-channel"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
]
[[package]]
name = "futures-core"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "generic-array"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"libc",
"wasi 0.11.1+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
]
[[package]]
name = "gimli"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "gloo-timers"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994"
dependencies = [
"futures-channel",
"futures-core",
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hashbrown"
version = "0.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "icu_collections"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47"
dependencies = [
"displaydoc",
"potential_utf",
"yoke",
"zerofrom",
"zerovec",
]
[[package]]
name = "icu_locale_core"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a"
dependencies = [
"displaydoc",
"litemap",
"tinystr",
"writeable",
"zerovec",
]
[[package]]
name = "icu_normalizer"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979"
dependencies = [
"displaydoc",
"icu_collections",
"icu_normalizer_data",
"icu_properties",
"icu_provider",
"smallvec",
"zerovec",
]
[[package]]
name = "icu_normalizer_data"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3"
[[package]]
name = "icu_properties"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b"
dependencies = [
"displaydoc",
"icu_collections",
"icu_locale_core",
"icu_properties_data",
"icu_provider",
"potential_utf",
"zerotrie",
"zerovec",
]
[[package]]
name = "icu_properties_data"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632"
[[package]]
name = "icu_provider"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af"
dependencies = [
"displaydoc",
"icu_locale_core",
"stable_deref_trait",
"tinystr",
"writeable",
"yoke",
"zerofrom",
"zerotrie",
"zerovec",
]
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
dependencies = [
"idna_adapter",
"smallvec",
"utf8_iter",
]
[[package]]
name = "idna_adapter"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344"
dependencies = [
"icu_normalizer",
"icu_properties",
]
[[package]]
name = "indexmap"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "indicatif"
version = "0.17.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235"
dependencies = [
"console",
"number_prefix",
"portable-atomic",
"unicode-width",
"web-time",
]
[[package]]
name = "is_terminal_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "jemalloc-sys"
version = "0.5.4+5.3.0-patched"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "jemallocator"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc"
dependencies = [
"jemalloc-sys",
"libc",
]
[[package]]
name = "jiff"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49"
dependencies = [
"jiff-static",
"log",
"portable-atomic",
"portable-atomic-util",
"serde",
]
[[package]]
name = "jiff-static"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "jobserver"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a"
dependencies = [
"getrandom 0.3.3",
"libc",
]
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
[[package]]
name = "libdeflate-sys"
version = "1.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "805824325366c44599dfeb62850fe3c7d7b3e3d75f9ab46785bc7dba3676815c"
dependencies = [
"cc",
]
[[package]]
name = "libdeflater"
version = "1.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b270bcc7e9d6dce967a504a55b1b0444f966aa9184e8605b531bc0492abb30bb"
dependencies = [
"libdeflate-sys",
]
[[package]]
name = "liblzma"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0791ab7e08ccc8e0ce893f6906eb2703ed8739d8e89b57c0714e71bad09024c8"
dependencies = [
"liblzma-sys",
"num_cpus",
]
[[package]]
name = "liblzma-sys"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01b9596486f6d60c3bbe644c0e1be1aa6ccc472ad630fe8927b456973d7cb736"
dependencies = [
"cc",
"libc",
"pkg-config",
]
[[package]]
name = "libz-rs-sys"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "172a788537a2221661b480fee8dc5f96c580eb34fa88764d3205dc356c7e4221"
dependencies = [
"zlib-rs",
]
[[package]]
name = "linux-raw-sys"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
[[package]]
name = "litemap"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
[[package]]
name = "litrs"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "lz4_flex"
version = "0.11.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]]
name = "memchr"
version = "2.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
dependencies = [
"adler2",
]
[[package]]
name = "nix"
version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6"
dependencies = [
"bitflags",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "no_std_io2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c2b9acd47481ab557a89a5665891be79e43cce8a29ad77aa9419d7be5a7c06a"
dependencies = [
"memchr",
]
[[package]]
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
dependencies = [
"overload",
"winapi",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "object"
version = "0.36.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "once_cell_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "percent-encoding"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pkg-config"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "portable-atomic"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
[[package]]
name = "portable-atomic-util"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
dependencies = [
"portable-atomic",
]
[[package]]
name = "potential_utf"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585"
dependencies = [
"zerovec",
]
[[package]]
name = "predicates"
version = "3.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573"
dependencies = [
"anstyle",
"difflib",
"predicates-core",
]
[[package]]
name = "predicates-core"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa"
[[package]]
name = "predicates-tree"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c"
dependencies = [
"predicates-core",
"termtree",
]
[[package]]
name = "proc-macro-crate"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
dependencies = [
"toml_edit",
]
[[package]]
name = "proc-macro2"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.9",
"regex-syntax 0.8.5",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.5",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "ring"
version = "0.17.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
dependencies = [
"cc",
"cfg-if",
"getrandom 0.2.16",
"libc",
"untrusted",
"windows-sys 0.52.0",
]
[[package]]
name = "rust-lzo"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf191ab1b954757cb5bb7f366e17d80daaa06010f1fdde7a3a7db052c1ecd1a8"
dependencies = [
"libc",
]
[[package]]
name = "rustc-demangle"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f"
[[package]]
name = "rustix"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.59.0",
]
[[package]]
name = "rustls"
version = "0.23.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643"
dependencies = [
"log",
"once_cell",
"ring",
"rustls-pki-types",
"rustls-webpki",
"subtle",
"zeroize",
]
[[package]]
name = "rustls-pki-types"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
dependencies = [
"zeroize",
]
[[package]]
name = "rustls-webpki"
version = "0.103.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435"
dependencies = [
"ring",
"rustls-pki-types",
"untrusted",
]
[[package]]
name = "rustversion"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "sha2"
version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "smallvec"
version = "1.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "solana-nohash-hasher"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e"
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "subtle"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synstructure"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tempfile"
version = "3.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
dependencies = [
"fastrand",
"getrandom 0.3.3",
"once_cell",
"rustix",
"windows-sys 0.59.0",
]
[[package]]
name = "terminal_size"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed"
dependencies = [
"rustix",
"windows-sys 0.59.0",
]
[[package]]
name = "termtree"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
[[package]]
name = "test-assets-ureq"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44fca2da79ad6439d233e4f726225e2b21ca80096735f48e0e3deca426797630"
dependencies = [
"backon",
"sha2",
"ureq",
]
[[package]]
name = "test-log"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7f46083d221181166e5b6f6b1e5f1d499f3a76888826e6cb1d057554157cd0f"
dependencies = [
"env_logger",
"test-log-macros",
"tracing-subscriber",
]
[[package]]
name = "test-log-macros"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "888d0c3c6db53c0fdab160d2ed5e12ba745383d3e85813f2ea0f2b1475ab553f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tests"
version = "0.0.0"
dependencies = [
"assert_cmd",
"backhand",
"backon",
"dir-diff",
"env_logger",
"libdeflater",
"nix",
"tempfile",
"test-assets-ureq",
"test-log",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "thiserror"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "thread_local"
version = "1.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185"
dependencies = [
"cfg-if",
]
[[package]]
name = "tinystr"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b"
dependencies = [
"displaydoc",
"zerovec",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "tokio"
version = "1.45.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
dependencies = [
"backtrace",
"pin-project-lite",
]
[[package]]
name = "toml_datetime"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
[[package]]
name = "toml_edit"
version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]
[[package]]
name = "typenum"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unicode-width"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c"
[[package]]
name = "untrusted"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "ureq"
version = "2.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d"
dependencies = [
"base64",
"flate2",
"log",
"once_cell",
"rustls",
"rustls-pki-types",
"url",
"webpki-roots 0.26.11",
]
[[package]]
name = "url"
version = "2.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
dependencies = [
"form_urlencoded",
"idna",
"percent-encoding",
]
[[package]]
name = "utf8_iter"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "utf8parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "valuable"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "wait-timeout"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11"
dependencies = [
"libc",
]
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "web-time"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki-roots"
version = "0.26.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
dependencies = [
"webpki-roots 1.0.1",
]
[[package]]
name = "webpki-roots"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets 0.53.2",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef"
dependencies = [
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",
"windows_i686_gnullvm 0.53.0",
"windows_i686_msvc 0.53.0",
"windows_x86_64_gnu 0.53.0",
"windows_x86_64_gnullvm 0.53.0",
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
[[package]]
name = "winnow"
version = "0.7.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
dependencies = [
"memchr",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "writeable"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
[[package]]
name = "wyz"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "xxhash-rust"
version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
[[package]]
name = "yoke"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc"
dependencies = [
"serde",
"stable_deref_trait",
"yoke-derive",
"zerofrom",
]
[[package]]
name = "yoke-derive"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
]
[[package]]
name = "zerofrom"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
dependencies = [
"zerofrom-derive",
]
[[package]]
name = "zerofrom-derive"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
]
[[package]]
name = "zeroize"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
[[package]]
name = "zerotrie"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595"
dependencies = [
"displaydoc",
"yoke",
"zerofrom",
]
[[package]]
name = "zerovec"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428"
dependencies = [
"yoke",
"zerofrom",
"zerovec-derive",
]
[[package]]
name = "zerovec-derive"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zlib-rs"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "626bd9fa9734751fc50d6060752170984d7053f5a39061f524cda68023d4db8a"
[[package]]
name = "zstd"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a"
dependencies = [
"zstd-safe",
]
[[package]]
name = "zstd-safe"
version = "7.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d"
dependencies = [
"zstd-sys",
]
[[package]]
name = "zstd-sys"
version = "2.0.15+zstd.1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237"
dependencies = [
"cc",
"pkg-config",
]
07070100000014000081A40000000000000000000000016854DB9500000277000000000000000000000000000000000000001B00000000backhand-0.23.0/Cargo.toml[workspace]
members = [
"backhand-cli",
"backhand",
# Internal
"backhand-test",
]
resolver = "2"
[workspace.package]
version = "0.23.0"
authors = ["wcampbell <wcampbell1995@gmail.com>"]
license = "MIT OR Apache-2.0"
edition = "2021"
repository = "https://github.com/wcampbell0x2a/backhand"
keywords = ["filesystem", "deku", "squashfs", "linux"]
categories = ["filesystem", "parsing"]
[profile.release]
overflow-checks = true
# Release(dist) binaries are setup for maximum runtime speed, at the cost of CI time
[profile.dist]
inherits = "release"
codegen-units = 1
lto = true
strip = true
overflow-checks = true
07070100000015000081A40000000000000000000000016854DB9500000153000000000000000000000000000000000000001B00000000backhand-0.23.0/Cross.toml[target.x86_64-unknown-linux-musl]
pre-build = [
"apt update && apt install zlib1g-dev liblzma-dev",
"git clone https://github.com/plougher/squashfs-tools.git -b squashfs-tools-4.6.1 && cd squashfs-tools/squashfs-tools && CONFIG=1 XZ_SUPPORT=1 GZIP_SUPPORT=1 make && make install",
]
[build.env]
passthrough = [
"RUST_LOG",
]
07070100000016000081A40000000000000000000000016854DB950000028E000000000000000000000000000000000000001F00000000backhand-0.23.0/DEVELOPMENT.md# Tooling
## Rust
This project uses the rust compiler. Follow instructions from [Installing Rust](rust-lang.org/tools/install).
## Justfile
This project includes a [justfile](justfile) for ease of development. [Installing Just](github.com/casey/just?tab=readme-ov-file#installation).
Hopefully this will eliminate errors before running the CI once your patch/merge request submitted!
## Building
```console
$ just build
```
## Testing
Testing requires `squashfs-tools`, to test that we are compatible. Install from your package manager.
```console
$ just test
```
## Linting
```console
$ just lint
```
See the [justfile](justfile) for more recipes!
07070100000017000081A40000000000000000000000016854DB95000025FC000000000000000000000000000000000000001F00000000backhand-0.23.0/LICENSE-APACHE Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
07070100000018000081A40000000000000000000000016854DB95000003FF000000000000000000000000000000000000001C00000000backhand-0.23.0/LICENSE-MITPermission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
07070100000019000081A40000000000000000000000016854DB9500001F75000000000000000000000000000000000000001A00000000backhand-0.23.0/README.mdbackhand
===============================
[<img alt="github" src="https://img.shields.io/badge/github-wcampbell0x2a/backhand-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/wcampbell0x2a/backhand)
[<img alt="crates.io" src="https://img.shields.io/crates/v/backhand.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/backhand)
[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-backhand-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/backhand)
[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/wcampbell0x2a/backhand/main.yml?branch=master&style=for-the-badge" height="20">](https://github.com/wcampbell0x2a/backhand/actions?query=branch%3Amaster)
[<img alt="Codecov" src="https://img.shields.io/codecov/c/github/wcampbell0x2a/backhand?style=for-the-badge" height="20">](https://app.codecov.io/gh/wcampbell0x2a/backhand)
Library and binaries for the reading, creating, and modification
of [SquashFS](https://en.wikipedia.org/wiki/SquashFS) file systems.
- **Library** — Backhand provides an easy way for programmatic analysis of Squashfs 4.0 images,
including the extraction and modification of images.
- **Feature Flags** — Supported compression and decompression are [feature flagged](https://docs.rs/backhand/latest/backhand/#features), so your final binary (or `unsquashfs`)
only needs to include code to extract one type of image.
- **Unconventional Support** — As well as supporting normal linux kernel SquashFS 4.0, we also support
the "wonderful world of vendor formats" with a [Kind](https://docs.rs/backhand/latest/backhand/kind/index.html) struct.
This allows changing the magic bytes, custom compression algorithms, and the Endian-ness of either the Data or Metadata fields.
This is controlled from `unsquashfs-backhand` through the use of the `--kind` option.
## Library
*Compiler support: requires rustc 1.84+*
Add the following to your `Cargo.toml` file:
```toml
[dependencies]
backhand = "0.23.0"
```
#### Target Support
Although additional targets may be supported, only the following have been fully tested or confirmed to build successfully.
| Target | `build` | `test` |
|----------------------------------------|:-------:|:------:|
| `x86_64-unknown-linux-musl` | ✓ | ✓ |
| `aarch64-unknown-linux-musl` | ✓ | ✓ |
| `arm-unknown-linux-musleabi` | ✓ | ✓ |
| `armv7-unknown-linux-musleabi` | ✓ | ✓ |
| `aarch64-unknown-linux-musl` | ✓ | ✓ |
| `x86_64-apple-darwin` | ✓ | ✓ |
| `x86_64-pc-windows-gnu` | ✓ | |
### Reading/Writing/Modifying Firmware
```rust,no_run
use std::fs::File;
use std::io::{Cursor, BufReader};
use backhand::{FilesystemReader, FilesystemWriter, NodeHeader};
// read
let file = BufReader::new(File::open("file.squashfs").unwrap());
let read_filesystem = FilesystemReader::from_reader(file).unwrap();
// convert to writer
let mut write_filesystem = FilesystemWriter::from_fs_reader(&read_filesystem).unwrap();
// add file with data from slice
let d = NodeHeader::default();
let bytes = Cursor::new(b"Fear is the mind-killer.");
write_filesystem.push_file(bytes, "a/d/e/new_file", d);
// add file with data from file
let new_file = File::open("dune").unwrap();
write_filesystem.push_file(new_file, "/root/dune", d);
// modify file
let bytes = Cursor::new(b"The sleeper must awaken.\n");
write_filesystem.replace_file("/a/b/c/d/e/first_file", bytes).unwrap();
// write into a new file
let mut output = File::create("modified.squashfs").unwrap();
write_filesystem.write(&mut output).unwrap();
```
## Binaries
*Compiler support: requires rustc 1.84+*
These are currently under development and are missing features, MR's welcome!
To install, run `cargo install backhand-cli --locked`, or download from the
[latest github release](https://github.com/wcampbell0x2a/backhand/releases/latest).
See ``--help`` for more information.
#### Target Support
Although additional targets may be supported, only the following have been tested and included in our GitHub releases.
| Target | `test` | `release` |
|----------------------------------------|:---------:|:---------:|
| `x86_64-unknown-linux-musl` | ✓ | ✓ |
| `aarch64-unknown-linux-musl` | ✓ | ✓ |
| `arm-unknown-linux-musleabi` | ✓ | ✓ |
| `armv7-unknown-linux-musleabi` | ✓ | ✓ |
| `aarch64-unknown-linux-musl` | ✓ | ✓ |
| `x86_64-apple-darwin` | ✓ | ✓ |
### unsquashfs-backhand
```no_test
tool to uncompress, extract and list squashfs filesystems
Usage: unsquashfs-backhand [OPTIONS] [FILESYSTEM]
Arguments:
[FILESYSTEM] Squashfs file
Options:
-o, --offset <BYTES> Skip BYTES at the start of FILESYSTEM [default: 0]
-a, --auto-offset Find first instance of squashfs --kind magic
-l, --list List filesystem, do not write to DEST (ignores --quiet)
-d, --dest <PATHNAME> Extract to [PATHNAME] [default: squashfs-root]
-i, --info Print files as they are extracted
--path-filter <PATH_FILTER> Limit filesystem extraction [default: /]
-f, --force If file already exists then overwrite
-s, --stat Display filesystem superblock information (ignores --quiet)
-k, --kind <KIND> Kind(type of image) to parse [default: le_v4_0] [possible
values: be_v4_0, le_v4_0, avm_be_v4_0]
--completions <COMPLETIONS> Emit shell completion scripts [possible values: bash, elvish,
fish, powershell, zsh]
--quiet Silence all progress bar and RUST_LOG output
-h, --help Print help (see more with '--help')
-V, --version Print version
```
### add-backhand
```no_test
tool to add a file or directory to squashfs filesystems
Usage: add-backhand [OPTIONS] <INPUT_IMAGE> <FILE_PATH_IN_IMAGE> <OUTPUT_IMAGE>
Arguments:
<INPUT_IMAGE> Squashfs input image
<FILE_PATH_IN_IMAGE> Path of file once inserted into squashfs
<OUTPUT_IMAGE> Squashfs output image path
Options:
-d, --dir Create empty directory
-f, --file <FILE> Path of file to read, to write into squashfs
--mode <MODE> Override mode read from <FILE>
--uid <UID> Override uid read from <FILE>
--gid <GID> Override gid read from <FILE>
--mtime <MTIME> Override mtime read from <FILE>
--pad-len <PAD_LEN> Custom KiB padding length
--no-compression-options Don't emit compression options
-h, --help Print help
-V, --version Print version
```
### replace-backhand
```no_test
tool to replace files in squashfs filesystems
Usage: replace-backhand [OPTIONS] <INPUT_IMAGE> <FILE> <FILE_PATH_IN_IMAGE> <OUTPUT_IMAGE>
Arguments:
<INPUT_IMAGE> Squashfs input image
<FILE> Path of file to read, to write into squashfs
<FILE_PATH_IN_IMAGE> Path of file replaced in image
<OUTPUT_IMAGE> Squashfs output image
Options:
--pad-len <PAD_LEN> Custom KiB padding length
--no-compression-options Don't emit compression options
-h, --help Print help
-V, --version Print version
```
## Development
All patches/merge requests are welcome! See the development guide for more details.
[DEVELOPMENT.md](DEVELOPMENT.md).
## Performance
See [BENCHMARK.md](BENCHMARK.md).
## Testing
See [backhand-test](backhand-test/README.md).
0707010000001A000081A40000000000000000000000016854DB950000037F000000000000000000000000000000000000001B00000000backhand-0.23.0/RELEASE.md# Release
## Update dependencies in `CHANGELOG.md`
This project uses renovate in order to keep our own dependencies up to date, and downstreak "lock" only
dependencies to test what most likely the end user will use.
For this library mostly just note compression library changes.
## Update benchmarks
```
$ ./bench.bash
```
## Bump Versions
```
$ cargo release version [LEVEL] -p backhand -p backhand-cli --execute
$ cargo release replace -p backhand -p backhand-cli --execute
```
## Update `CHANGELOG.md`
## Update `BENCHMARK.md`
## Create MR / Merge Into Master
## Tag Release
Create tag and push to github. This will run the `.github/workflows/binaries.yml` job and create
a [Release](https://github.com/wcampbell0x2a/backhand/releases) if the CI passes.
## Publish to `crates.io`
```
$ git clean -xdf
$ cargo publish --locked -p backhand
$ cargo publish --locked -p backhand-cli
````
0707010000001B000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001900000000backhand-0.23.0/backhand0707010000001C000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001D00000000backhand-0.23.0/backhand-cli0707010000001D000081A40000000000000000000000016854DB9500000747000000000000000000000000000000000000002800000000backhand-0.23.0/backhand-cli/Cargo.toml[package]
name = "backhand-cli"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
rust-version = "1.84.0"
description = "Binaries for the reading, creating, and modification of SquashFS file systems"
readme = "../README.md"
[dependencies]
nix = { version = "0.30.0", default-features = false, features = ["fs"] }
clap = { version = "4.5.13", features = ["derive", "wrap_help"] }
tracing-subscriber = { version = "0.3.18", features = ["env-filter", "fmt"] }
libc = "0.2.162"
clap_complete = "4.5.13"
indicatif = "0.17.8"
console = "0.15.8"
rayon = "1.10.0"
backhand = { path = "../backhand", default-features = false, version = "0.23.0" }
tracing = "0.1.40"
color-print = "0.3.6"
clap-cargo = "0.15.0"
[lib]
bench = false
[target.'cfg(all(target_env = "musl", target_pointer_width = "64"))'.dependencies.jemallocator]
version = "0.5.4"
# These features mirror the backhand features
[features]
default = ["xz", "gzip", "zstd"]
## Enables xz compression inside library and binaries
xz = ["backhand/xz"]
## Enables xz compression and forces static build inside library and binaries
xz-static = ["xz", "backhand/xz-static"]
## Enables gzip compression inside library and binaries
any-gzip = []
gzip = ["any-gzip", "backhand/gzip"]
## This library is licensed GPL and thus disabled by default
lzo = ["backhand/lzo"]
## Enables zstd compression inside library and binaries
zstd = ["backhand/zstd"]
lz4 = ["backhand/lz4"]
## Enable backhand parallel decompression
backhand-parallel = ["backhand/parallel"]
[[bin]]
name = "unsquashfs-backhand"
path = "src/bin/unsquashfs.rs"
bench = false
[[bin]]
name = "add-backhand"
path = "src/bin/add.rs"
bench = false
[[bin]]
name = "replace-backhand"
path = "src/bin/replace.rs"
bench = false
0707010000001E000081A40000000000000000000000016854DB9500000019000000000000000000000000000000000000002A00000000backhand-0.23.0/backhand-cli/release.tomlpush=false
publish=false
0707010000001F000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002100000000backhand-0.23.0/backhand-cli/src07070100000020000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002500000000backhand-0.23.0/backhand-cli/src/bin07070100000021000081A40000000000000000000000016854DB9500000F9E000000000000000000000000000000000000002C00000000backhand-0.23.0/backhand-cli/src/bin/add.rsuse std::fs::File;
use std::io::BufReader;
use std::os::unix::fs::MetadataExt;
use std::path::PathBuf;
use std::process::ExitCode;
use backhand::{FilesystemReader, FilesystemWriter, NodeHeader};
use backhand_cli::after_help;
use clap::Parser;
use tracing::{error, info};
use tracing_subscriber::EnvFilter;
// -musl malloc is slow, use jemalloc
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
/// tool to add a file or directory to squashfs filesystems
#[derive(Parser, Debug)]
#[command(author,
version,
name = "add-backhand",
after_help = after_help(false),
max_term_width = 98,
styles = clap_cargo::style::CLAP_STYLING,
)]
struct Args {
/// Squashfs input image
input_image: PathBuf,
/// Create empty directory
#[clap(short, long)]
dir: bool,
/// Path of file to read, to write into squashfs
#[clap(short, long)]
#[clap(required_unless_present = "dir")]
file: Option<PathBuf>,
/// Path of file once inserted into squashfs
#[clap(name = "FILE_PATH_IN_IMAGE")]
path: PathBuf,
/// Squashfs output image path
output_image: PathBuf,
/// Override mode read from <FILE>
#[clap(long, required_if_eq("dir", "true"))]
mode: Option<u16>,
/// Override uid read from <FILE>
#[clap(long, required_if_eq("dir", "true"))]
uid: Option<u32>,
/// Override gid read from <FILE>
#[clap(long, required_if_eq("dir", "true"))]
gid: Option<u32>,
/// Override mtime read from <FILE>
#[clap(long, required_if_eq("dir", "true"))]
mtime: Option<u32>,
/// Custom KiB padding length
#[clap(long)]
pad_len: Option<u32>,
/// Don't emit compression options
#[clap(long)]
no_compression_options: bool,
}
fn main() -> ExitCode {
// setup tracing to RUST_LOG or just info
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("add=info"));
tracing_subscriber::fmt().with_env_filter(env_filter).init();
let args = Args::parse();
// read of squashfs
let file = File::open(args.input_image).unwrap();
let file = BufReader::new(file);
let filesystem = FilesystemReader::from_reader(file).unwrap();
let mut filesystem = FilesystemWriter::from_fs_reader(&filesystem).unwrap();
// create new file
if let Some(file) = args.file {
let new_file = File::open(&file).unwrap();
// if metadata isn't already defined, use from file
let meta = file.metadata().unwrap();
let mode = args.mode.unwrap_or(meta.mode() as u16) & 0xfff;
let uid = args.uid.unwrap_or(meta.uid());
let gid = args.gid.unwrap_or(meta.gid());
let mtime = args.mtime.unwrap_or(meta.mtime() as u32);
let node = NodeHeader::new(mode, uid, gid, mtime);
if let Err(e) = filesystem.push_file(new_file, args.path, node) {
error!("{e}");
return ExitCode::FAILURE;
}
} else if args.dir {
// use file meta from args
let node = NodeHeader::new(
args.mode.unwrap(),
args.uid.unwrap(),
args.gid.unwrap(),
args.mtime.unwrap(),
);
if let Err(e) = filesystem.push_dir(args.path, node) {
error!("{e}");
return ExitCode::FAILURE;
}
}
if let Some(pad_len) = args.pad_len {
filesystem.set_kib_padding(pad_len)
}
if args.no_compression_options {
filesystem.set_emit_compression_options(false);
}
// write new file
let Ok(output) = File::create_new(&args.output_image) else {
error!("failed to open {}", args.output_image.display());
return ExitCode::FAILURE;
};
if let Err(e) = filesystem.write(output) {
error!("{e}");
}
info!("added file and wrote to {}", args.output_image.display());
ExitCode::SUCCESS
}
07070100000022000081A40000000000000000000000016854DB9500000A78000000000000000000000000000000000000003000000000backhand-0.23.0/backhand-cli/src/bin/replace.rsuse std::fs::File;
use std::io::BufReader;
use std::path::PathBuf;
use std::process::ExitCode;
use backhand::{FilesystemReader, FilesystemWriter};
use backhand_cli::after_help;
use clap::Parser;
use tracing::error;
use tracing_subscriber::EnvFilter;
// -musl malloc is slow, use jemalloc
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
/// tool to replace files in squashfs filesystems
#[derive(Parser, Debug)]
#[command(author,
version,
name = "replace-backhand",
after_help = after_help(false),
max_term_width = 98,
styles = clap_cargo::style::CLAP_STYLING,
)]
struct Args {
/// Squashfs input image
input_image: PathBuf,
/// Path of file to read, to write into squashfs
file: PathBuf,
/// Path of file replaced in image
#[clap(name = "FILE_PATH_IN_IMAGE")]
file_path: PathBuf,
/// Squashfs output image
output_image: PathBuf,
/// Custom KiB padding length
#[clap(long)]
pad_len: Option<u32>,
/// Don't emit compression options
#[clap(long)]
no_compression_options: bool,
}
fn main() -> ExitCode {
// setup tracing to RUST_LOG or just info
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("replace=info"));
tracing_subscriber::fmt().with_env_filter(env_filter).init();
let args = Args::parse();
// read of squashfs
let Ok(file) = File::open(&args.input_image) else {
error!("unable to open {}", args.input_image.display());
return ExitCode::FAILURE;
};
let file = BufReader::new(file);
let filesystem = FilesystemReader::from_reader(file).unwrap();
let mut filesystem = FilesystemWriter::from_fs_reader(&filesystem).unwrap();
// Modify file
let Ok(new_file) = File::open(&args.file) else {
error!("unable to open {}", args.file.display());
return ExitCode::FAILURE;
};
if let Err(e) = filesystem.replace_file(args.file_path, new_file) {
error!("{e}");
return ExitCode::FAILURE;
}
if let Some(pad_len) = args.pad_len {
filesystem.set_kib_padding(pad_len)
}
if args.no_compression_options {
filesystem.set_emit_compression_options(false);
}
// write new file
let Ok(output) = File::create_new(&args.output_image) else {
error!("failed to open {}", args.output_image.display());
return ExitCode::FAILURE;
};
filesystem.write(output).unwrap();
println!("replaced file and wrote to {}", args.output_image.display());
ExitCode::SUCCESS
}
07070100000023000081A40000000000000000000000016854DB95000063AB000000000000000000000000000000000000003300000000backhand-0.23.0/backhand-cli/src/bin/unsquashfs.rsuse std::collections::HashSet;
use std::fs::{self, File, Permissions};
use std::io::{self, BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::os::unix::fs::lchown;
use std::os::unix::prelude::PermissionsExt;
use std::path::{Component, Path, PathBuf};
use std::process::ExitCode;
use std::sync::Mutex;
use backhand::kind::Kind;
use backhand::{
BufReadSeek, FilesystemReader, InnerNode, Node, NodeHeader, Squashfs, SquashfsBlockDevice,
SquashfsCharacterDevice, SquashfsDir, SquashfsFileReader, SquashfsSymlink, DEFAULT_BLOCK_SIZE,
};
use backhand_cli::after_help;
use clap::builder::PossibleValuesParser;
use clap::{CommandFactory, Parser};
use clap_complete::{generate, Shell};
use console::Term;
use indicatif::{HumanDuration, ProgressBar, ProgressStyle};
use nix::fcntl::AT_FDCWD;
use nix::libc::geteuid;
use nix::sys::stat::{dev_t, mknod, mode_t, umask, utimensat, utimes, Mode, SFlag, UtimensatFlags};
use nix::sys::time::{TimeSpec, TimeVal};
use nix::unistd::mkfifo;
use rayon::prelude::*;
use std::time::{Duration, Instant};
// -musl malloc is slow, use jemalloc
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
pub fn required_root(a: &str) -> Result<PathBuf, String> {
let p = PathBuf::from(a);
if p.has_root() {
Ok(p)
} else {
Err("argument requires root \"/\"".to_string())
}
}
fn find_offset(file: &mut BufReader<File>, kind: &Kind) -> Option<u64> {
let mut magic = [0_u8; 4];
while file.read_exact(&mut magic).is_ok() {
if magic == kind.magic() {
let found = file.stream_position().unwrap() - magic.len() as u64;
file.rewind().unwrap();
return Some(found);
}
}
file.rewind().unwrap();
None
}
pub fn extracted(pb: &ProgressBar, s: &str) {
let blue_bold: console::Style = console::Style::new().blue().bold();
let line = format!("{:>16} {}", blue_bold.apply_to("Extracted"), s,);
pb.println(line);
}
pub fn created(pb: &ProgressBar, s: &str) {
let blue_bold: console::Style = console::Style::new().blue().bold();
let line = format!("{:>16} {}", blue_bold.apply_to("Created"), s,);
pb.println(line);
}
pub fn exists(pb: &ProgressBar, s: &str) {
let red_bold: console::Style = console::Style::new().red().bold();
let line = format!("{:>16} {}", red_bold.apply_to("Exists"), s,);
pb.println(line);
}
pub fn failed(pb: &ProgressBar, s: &str) {
let red_bold: console::Style = console::Style::new().red().bold();
let line = format!("{:>16} {}", red_bold.apply_to("Failed"), s,);
pb.println(line);
}
/// tool to uncompress, extract and list squashfs filesystems
#[derive(Parser)]
#[command(author,
version,
name = "unsquashfs-backhand",
after_help = after_help(true),
max_term_width = 98,
styles = clap_cargo::style::CLAP_STYLING,
)]
struct Args {
/// Squashfs file
///
/// Required for all usage, except --completions
#[arg(required_unless_present = "completions")]
filesystem: Option<PathBuf>,
/// Skip BYTES at the start of FILESYSTEM
#[arg(short, long, default_value_t = 0, name = "BYTES")]
offset: u64,
/// Find first instance of squashfs --kind magic
///
/// Will overwrite given --offset
#[arg(short, long)]
auto_offset: bool,
/// List filesystem, do not write to DEST (ignores --quiet)
#[arg(short, long)]
list: bool,
/// Extract to [PATHNAME]
#[arg(short, long, default_value = "squashfs-root", name = "PATHNAME")]
dest: PathBuf,
/// Print files as they are extracted
#[arg(short, long)]
info: bool,
/// Limit filesystem extraction
///
/// For example, "/www/webpages/data" will return all files under that dir, such as
/// "/www/webpages/data/region.json" and "/www/webpages/data/timezone.json". When given an
/// exact file, only that file will be extracted.
///
/// Like normal operation, these will be extracted as {arg.dest}{arg.path_filter}{files} with
/// correct file permissions.
#[arg(long, default_value = "/", value_parser = required_root)]
path_filter: PathBuf,
/// If file already exists then overwrite
#[arg(short, long)]
force: bool,
/// Display filesystem superblock information (ignores --quiet)
#[arg(short, long)]
stat: bool,
/// Kind(type of image) to parse
#[arg(short,
long,
default_value = "le_v4_0",
value_parser = PossibleValuesParser::new(
[
"be_v4_0",
"le_v4_0",
"avm_be_v4_0",
]
))]
kind: String,
/// Emit shell completion scripts
#[arg(long)]
completions: Option<Shell>,
/// Silence all progress bar and RUST_LOG output
#[arg(long)]
quiet: bool,
}
fn main() -> ExitCode {
let mut args = Args::parse();
if !args.quiet {
tracing_subscriber::fmt::init();
}
if let Some(completions) = args.completions {
let mut cmd = Args::command();
let name = cmd.get_name().to_string();
generate(completions, &mut cmd, name, &mut io::stdout());
return ExitCode::SUCCESS;
}
let kind = Kind::from_target(&args.kind).unwrap();
let mut file = BufReader::with_capacity(
DEFAULT_BLOCK_SIZE as usize,
File::open(args.filesystem.as_ref().unwrap()).unwrap(),
);
let blue_bold: console::Style = console::Style::new().blue().bold();
let red_bold: console::Style = console::Style::new().red().bold();
let pb = ProgressBar::new_spinner();
if args.auto_offset {
if !args.quiet {
pb.enable_steady_tick(Duration::from_millis(120));
let line = format!("{:>14}", blue_bold.apply_to("Searching for magic"));
pb.set_message(line);
}
if let Some(found_offset) = find_offset(&mut file, &kind) {
if !args.quiet {
let line =
format!("{:>14} 0x{:08x}", blue_bold.apply_to("Found magic"), found_offset,);
pb.finish_with_message(line);
}
args.offset = found_offset;
} else {
if !args.quiet {
let line = format!("{:>14}", red_bold.apply_to("Magic not found"),);
pb.finish_with_message(line);
}
return ExitCode::FAILURE;
}
}
if args.stat {
stat(args, file, kind);
return ExitCode::SUCCESS;
}
let squashfs = match Squashfs::from_reader_with_offset_and_kind(file, args.offset, kind) {
Ok(s) => s,
Err(_e) => {
let line = format!("{:>14}", red_bold.apply_to(format!("Could not read image: {_e}")));
pb.finish_with_message(line);
return ExitCode::FAILURE;
}
};
let root_process = unsafe { geteuid() == 0 };
if root_process {
umask(Mode::from_bits(0).unwrap());
}
// Start new spinner as we extract all the inode and other information from the image
// This can be very time consuming
let start = Instant::now();
let pb = ProgressBar::new_spinner();
if !args.quiet {
pb.enable_steady_tick(Duration::from_millis(120));
let line = format!("{:>14}", blue_bold.apply_to("Reading image"));
pb.set_message(line);
}
let filesystem = squashfs.into_filesystem_reader().unwrap();
if !args.quiet {
let line = format!("{:>14}", blue_bold.apply_to("Read image"));
pb.finish_with_message(line);
}
// if we can find a parent, then a filter must be applied and the exact parent dirs must be
// found above it
let mut files: Vec<&Node<SquashfsFileReader>> = vec![];
if args.path_filter.parent().is_some() {
let mut current = PathBuf::new();
current.push("/");
for part in args.path_filter.iter() {
current.push(part);
if let Some(exact) = filesystem.files().find(|&a| a.fullpath == current) {
files.push(exact);
} else {
if !args.quiet {
let line = format!(
"{:>14}",
red_bold.apply_to("Invalid --path-filter, path doesn't exist")
);
pb.finish_with_message(line);
}
return ExitCode::FAILURE;
}
}
// remove the final node, this is a file and will be caught in the following statement
files.pop();
}
// gather all files and dirs
let files_len = files.len();
let nodes = files
.into_iter()
.chain(filesystem.files().filter(|a| a.fullpath.starts_with(&args.path_filter)));
// extract or list
if args.list {
list(nodes);
} else {
// This could be expensive, only pass this in when not quiet
let n_nodes = if !args.quiet {
Some(
files_len
+ filesystem
.files()
.filter(|a| a.fullpath.starts_with(&args.path_filter))
.count(),
)
} else {
None
};
extract_all(
&args,
&filesystem,
root_process,
nodes.collect::<Vec<&Node<SquashfsFileReader>>>().into_par_iter(),
n_nodes,
start,
);
}
ExitCode::SUCCESS
}
fn list<'a>(nodes: impl Iterator<Item = &'a Node<SquashfsFileReader>>) {
for node in nodes {
let path = &node.fullpath;
println!("{}", path.display());
}
}
fn stat(args: Args, mut file: BufReader<File>, kind: Kind) {
file.seek(SeekFrom::Start(args.offset)).unwrap();
let mut reader: Box<dyn BufReadSeek> = Box::new(file);
let (superblock, compression_options) =
Squashfs::superblock_and_compression_options(&mut reader, &kind).unwrap();
// show info about flags
println!("{superblock:#08x?}");
// show info about compression options
println!("Compression Options: {compression_options:#x?}");
// show info about flags
if superblock.inodes_uncompressed() {
println!("flag: inodes uncompressed");
}
if superblock.data_block_stored_uncompressed() {
println!("flag: data blocks stored uncompressed");
}
if superblock.fragments_stored_uncompressed() {
println!("flag: fragments stored uncompressed");
}
if superblock.fragments_are_not_used() {
println!("flag: fragments are not used");
}
if superblock.fragments_are_always_generated() {
println!("flag: fragments are always generated");
}
if superblock.data_has_been_deduplicated() {
println!("flag: data has been deduplicated");
}
if superblock.nfs_export_table_exists() {
println!("flag: nfs export table exists");
}
if superblock.xattrs_are_stored_uncompressed() {
println!("flag: xattrs are stored uncompressed");
}
if superblock.compressor_options_are_present() {
println!("flag: compressor options are present");
}
}
fn set_attributes(
pb: &ProgressBar,
args: &Args,
path: &Path,
header: &NodeHeader,
root_process: bool,
is_file: bool,
) {
// TODO Use (file_set_times) when not nightly: https://github.com/rust-lang/rust/issues/98245
let timeval = TimeVal::new(header.mtime as _, 0);
utimes(path, &timeval, &timeval).unwrap();
let mut mode = u32::from(header.permissions);
// Only chown when root
if root_process {
// TODO: Use (unix_chown) when not nightly: https://github.com/rust-lang/rust/issues/88989
match lchown(path, Some(header.uid), Some(header.gid)) {
Ok(_) => (),
Err(e) => {
if !args.quiet {
let line =
format!("lchown {} {} {} : {e}", path.display(), header.uid, header.gid,);
failed(pb, &line);
}
return;
}
}
} else if is_file {
// bitwise-not if not rooted (disable write permissions for user/group). Following
// squashfs-tools/unsquashfs behavior
mode &= !0o022;
}
// set permissions
//
// NOTE: In squashfs-tools/unsquashfs they remove the write bits for user and group?
// I don't know if there is a reason for that but I keep the permissions the same if possible
if let Err(e) = fs::set_permissions(path, Permissions::from_mode(mode)) {
if e.kind() == std::io::ErrorKind::PermissionDenied {
// try without sticky bit
if fs::set_permissions(path, Permissions::from_mode(mode & !1000)).is_err()
&& !args.quiet
{
let line = format!("{} : could not set permissions", path.to_str().unwrap());
failed(pb, &line);
}
}
}
}
fn extract_all<'a, S: ParallelIterator<Item = &'a Node<SquashfsFileReader>>>(
args: &Args,
filesystem: &'a FilesystemReader,
root_process: bool,
nodes: S,
n_nodes: Option<usize>,
start: Instant,
) {
let pb = ProgressBar::new(n_nodes.unwrap_or(0) as u64);
if !args.quiet {
pb.set_style(ProgressStyle::default_spinner());
pb.set_style(
ProgressStyle::with_template(
// note that bar size is fixed unlike cargo which is dynamic
// and also the truncation in cargo uses trailers (`...`)
if Term::stdout().size().1 > 80 {
"{prefix:>16.cyan.bold} [{bar:57}] {pos}/{len} {wide_msg}"
} else {
"{prefix:>16.cyan.bold} [{bar:57}] {pos}/{len}"
},
)
.unwrap()
.progress_chars("=> "),
);
pb.set_prefix("Extracting");
pb.inc(1);
}
let processing = Mutex::new(HashSet::new());
nodes.for_each(|node| {
let path = &node.fullpath;
let fullpath = path.strip_prefix(Component::RootDir).unwrap_or(path);
if !args.quiet {
let mut p = processing.lock().unwrap();
p.insert(fullpath);
pb.set_message(
p.iter()
.map(|a| a.to_path_buf().into_os_string().into_string().unwrap())
.collect::<Vec<String>>()
.join(", "),
);
pb.inc(1);
}
let filepath = Path::new(&args.dest).join(fullpath);
// create required dirs, we will fix permissions later
let _ = fs::create_dir_all(filepath.parent().unwrap());
match &node.inner {
InnerNode::File(file) => {
// alloc required space for file data readers
// check if file exists
if !args.force && filepath.exists() {
if !args.quiet {
exists(&pb, filepath.to_str().unwrap());
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
// write to file
let fd = File::create(&filepath).unwrap();
let mut writer = BufWriter::with_capacity(file.file_len(), &fd);
let file = filesystem.file(file);
let mut reader = file.reader();
match io::copy(&mut reader, &mut writer) {
Ok(_) => {
if args.info && !args.quiet {
extracted(&pb, filepath.to_str().unwrap());
}
set_attributes(&pb, args, &filepath, &node.header, root_process, true);
}
Err(e) => {
if !args.quiet {
let line = format!("{} : {e}", filepath.to_str().unwrap());
failed(&pb, &line);
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
}
writer.flush().unwrap();
}
InnerNode::Symlink(SquashfsSymlink { link }) => {
// create symlink
let link_display = link.display();
// check if file exists
if !args.force && filepath.exists() {
exists(&pb, filepath.to_str().unwrap());
let mut p = processing.lock().unwrap();
p.remove(fullpath);
return;
}
match std::os::unix::fs::symlink(link, &filepath) {
Ok(_) => {
if args.info && !args.quiet {
let line = format!("{}->{link_display}", filepath.to_str().unwrap());
created(&pb, &line);
}
}
Err(e) => {
if !args.quiet {
let line =
format!("{}->{link_display} : {e}", filepath.to_str().unwrap());
failed(&pb, &line);
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
}
// set attributes, but special to not follow the symlink
if root_process {
// TODO: Use (unix_chown) when not nightly: https://github.com/rust-lang/rust/issues/88989
match lchown(&filepath, Some(node.header.uid), Some(node.header.gid)) {
Ok(_) => (),
Err(e) => {
if !args.quiet {
let line = format!(
"lchown {} {} {} : {e}",
filepath.display(),
node.header.uid,
node.header.gid,
);
failed(&pb, &line);
}
let mut p = processing.lock().unwrap();
p.remove(fullpath);
return;
}
}
}
// TODO Use (file_set_times) when not nightly: https://github.com/rust-lang/rust/issues/98245
// Make sure this doesn't follow symlinks when changed to std library!
let timespec = TimeSpec::new(node.header.mtime as _, 0);
utimensat(
AT_FDCWD,
&filepath,
×pec,
×pec,
UtimensatFlags::NoFollowSymlink,
)
.unwrap();
}
InnerNode::Dir(SquashfsDir { .. }) => {
// These permissions are corrected later (user default permissions for now)
//
// don't display error if this was already created, we might have already
// created it in another thread to put down a file
if std::fs::create_dir(&filepath).is_ok() && args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap())
}
}
InnerNode::CharacterDevice(SquashfsCharacterDevice { device_number }) => {
if root_process {
#[allow(clippy::unnecessary_fallible_conversions)]
match mknod(
&filepath,
SFlag::S_IFCHR,
Mode::from_bits(mode_t::from(node.header.permissions)).unwrap(),
dev_t::try_from(*device_number).unwrap(),
) {
Ok(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
}
set_attributes(&pb, args, &filepath, &node.header, root_process, true);
}
Err(_) => {
if !args.quiet {
let line = format!(
"char device {}, are you superuser?",
filepath.to_str().unwrap()
);
failed(&pb, &line);
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
}
} else {
if !args.quiet {
let line = format!(
"char device {}, are you superuser?",
filepath.to_str().unwrap()
);
failed(&pb, &line);
}
let mut p = processing.lock().unwrap();
p.remove(fullpath);
return;
}
}
InnerNode::BlockDevice(SquashfsBlockDevice { device_number }) => {
#[allow(clippy::unnecessary_fallible_conversions)]
match mknod(
&filepath,
SFlag::S_IFBLK,
Mode::from_bits(mode_t::from(node.header.permissions)).unwrap(),
dev_t::try_from(*device_number).unwrap(),
) {
Ok(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
}
set_attributes(&pb, args, &filepath, &node.header, root_process, true);
}
Err(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
}
}
InnerNode::NamedPipe => {
match mkfifo(
&filepath,
Mode::from_bits(mode_t::from(node.header.permissions)).unwrap(),
) {
Ok(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
}
set_attributes(&pb, args, &filepath, &node.header, root_process, true);
}
Err(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
}
let mut p = processing.lock().unwrap();
p.remove(fullpath);
return;
}
}
}
InnerNode::Socket => {
#[allow(clippy::unnecessary_fallible_conversions)]
match mknod(
&filepath,
SFlag::S_IFSOCK,
Mode::from_bits(mode_t::from(node.header.permissions)).unwrap(),
dev_t::try_from(0_u64).unwrap(),
) {
Ok(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
}
set_attributes(&pb, args, &filepath, &node.header, root_process, true);
}
Err(_) => {
if args.info && !args.quiet {
created(&pb, filepath.to_str().unwrap());
let mut p = processing.lock().unwrap();
p.remove(fullpath);
}
return;
}
}
}
}
let mut p = processing.lock().unwrap();
p.remove(fullpath);
});
// fixup dir permissions
for node in filesystem.files().filter(|a| a.fullpath.starts_with(&args.path_filter)) {
if let InnerNode::Dir(SquashfsDir { .. }) = &node.inner {
let path = &node.fullpath;
let path = path.strip_prefix(Component::RootDir).unwrap_or(path);
let path = Path::new(&args.dest).join(path);
set_attributes(&pb, args, &path, &node.header, root_process, false);
}
}
pb.finish_and_clear();
// extraction is finished
let green_bold: console::Style = console::Style::new().green().bold();
if !args.quiet {
println!(
"{:>16} extraction of {} nodes in {}",
green_bold.apply_to("Finished"),
n_nodes.unwrap(),
HumanDuration(start.elapsed())
);
}
}
07070100000024000081A40000000000000000000000016854DB9500000691000000000000000000000000000000000000002800000000backhand-0.23.0/backhand-cli/src/lib.rs// Compiled for every binary, as this is not a workspace
use clap::builder::styling::*;
#[doc(hidden)]
pub fn styles() -> clap::builder::Styles {
Styles::styled()
.header(AnsiColor::Green.on_default() | Effects::BOLD)
.usage(AnsiColor::Green.on_default() | Effects::BOLD)
.literal(AnsiColor::Cyan.on_default() | Effects::BOLD)
.placeholder(AnsiColor::Cyan.on_default())
.error(AnsiColor::Red.on_default() | Effects::BOLD)
.valid(AnsiColor::Cyan.on_default() | Effects::BOLD)
.invalid(AnsiColor::Yellow.on_default() | Effects::BOLD)
}
#[doc(hidden)]
pub fn after_help(rayon_env: bool) -> String {
let mut s = String::new();
let header = color_print::cstr!("<green, bold>Decompressors available:</>\n");
s.push_str(header);
#[cfg(feature = "any-gzip")]
s.push_str(color_print::cstr!(" <cyan, bold>gzip\n"));
#[cfg(feature = "xz")]
s.push_str(color_print::cstr!(" <cyan, bold>xz\n"));
#[cfg(feature = "lzo")]
s.push_str(color_print::cstr!(" <cyan, bold>lzo\n"));
#[cfg(feature = "zstd")]
s.push_str(color_print::cstr!(" <cyan, bold>zstd\n"));
s.push_str(color_print::cstr!("<green, bold>Environment Variables:\n"));
s.push_str(color_print::cstr!(" <cyan, bold>RUST_LOG:"));
s.push_str(" https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/index.html#filtering-events-with-environment-variables");
if rayon_env {
s.push('\n');
s.push_str(color_print::cstr!(" <cyan, bold>RAYON_NUM_THREADS:"));
s.push_str(r#" https://docs.rs/rayon/latest/rayon/struct.ThreadPoolBuilder.html#method.num_threads"#);
}
s
}
07070100000025000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001E00000000backhand-0.23.0/backhand-test07070100000026000081A40000000000000000000000016854DB95000004DF000000000000000000000000000000000000002900000000backhand-0.23.0/backhand-test/Cargo.toml[package]
name = "tests"
version = "0.0.0"
edition = "2021"
publish = false
[dev-dependencies]
backhand = { path = "../backhand", default-features = false }
assert_cmd = { version = "2.0.16", features = ["color", "color-auto"] }
dir-diff = { git = "https://github.com/wcampbell0x2a/dir-diff", branch = "add-checking-permissions" }
tempfile = "3.14.0"
test-assets-ureq = "0.3.0"
test-log = { version = "0.2.16", features = ["trace"] }
tracing = "0.1.40"
libdeflater = "1.22.0"
env_logger = "0.11.5"
tracing-subscriber = { version = "0.3.18", features = ["env-filter", "fmt"] }
nix = { version = "0.30.0", default-features = false, features = ["fs"] }
backon = "1.2.0"
[lib]
bench = false
[features]
# testing only feature for testing vs squashfs-tools/unsquashfs
__test_unsquashfs = []
default = ["xz", "gzip", "zstd"]
xz = ["backhand/xz"]
xz-static = ["backhand/xz-static"]
any-gzip = []
gzip = ["any-gzip", "backhand/gzip"]
# this library is licensed GPL and thus disabled by default
lzo = ["backhand/lzo"]
zstd = ["backhand/zstd"]
lz4 = ["backhand/lz4"]
[[test]]
name = "add"
[[test]]
name = "issues"
[[test]]
name = "mutate"
[[test]]
name = "non_standard"
[[test]]
name = "raw"
[[test]]
name = "replace"
[[test]]
name = "unsquashfs"
07070100000027000081A40000000000000000000000016854DB9500000359000000000000000000000000000000000000002800000000backhand-0.23.0/backhand-test/README.md# Testing
This package contains the testing both for `backhand` and `backhand-cli`.
First, build the binaries that will be tested along with unit tests.
```
$ cargo build --release --bins
```
Then, run the tests:
```
$ cargo test --workspace --release --all-features
```
## Cross platform testing
You can also use `cargo-cross` to test on other architectures.
See [ci](.github/workflows/main.yml) for an example of testing. We currently test the following in CI:
- x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl
- arm-unknown-linux-musleabi
- armv7-unknown-linux-musleabi
## Coverage
```
$ cargo llvm-cov run --bin replace --no-clean --release
$ cargo llvm-cov run --bin add --no-clean --release
$ cargo llvm-cov run --bin unsquashfs --no-clean --release
$ cargo llvm-cov --html --workspace --all-features --release --no-clean -- --skip slow
```
07070100000028000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002200000000backhand-0.23.0/backhand-test/src07070100000029000081A40000000000000000000000016854DB9500000001000000000000000000000000000000000000002900000000backhand-0.23.0/backhand-test/src/lib.rs
0707010000002A000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002400000000backhand-0.23.0/backhand-test/tests0707010000002B000081A40000000000000000000000016854DB950000196A000000000000000000000000000000000000002B00000000backhand-0.23.0/backhand-test/tests/add.rsmod common;
use std::process::Command;
use assert_cmd::prelude::*;
use tempfile::tempdir;
use test_assets_ureq::TestAssetDef;
use test_log::test;
#[test]
#[cfg(feature = "xz")]
fn test_add() {
use std::fs::File;
use std::io::Write;
use std::os::unix::prelude::PermissionsExt;
use backhand::DEFAULT_BLOCK_SIZE;
use nix::sys::stat::utimes;
use nix::sys::time::TimeVal;
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "6195e4d8d14c63dffa9691d36efa1eda2ee975b476bb95d4a0b59638fd9973cb".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_05/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_01";
common::download_backoff(&asset_defs, TEST_PATH);
let image_path = format!("{TEST_PATH}/{FILE_NAME}");
// Add /test dir
// ./target/release/add test-assets/test_05/out.squashfs /test --dir --gid 4242 --mtime 1 --uid 2 --mode 511 -o $tmp/out
let tmp_dir = tempdir().unwrap();
let cmd = common::get_base_command("add-backhand")
.env("RUST_LOG", "none")
.args([
&image_path,
"/test",
tmp_dir.path().join("out").to_str().unwrap(),
"--dir",
"--gid",
"4242",
"--mtime",
"60",
"--uid",
"2",
"--mode",
"777",
])
.unwrap();
cmd.assert().code(0);
let mut file = File::create(tmp_dir.path().join("file").to_str().unwrap()).unwrap();
file.write_all(b"nice").unwrap();
let mut file = File::create(tmp_dir.path().join("big_file").to_str().unwrap()).unwrap();
file.write_all(&[b'a'; DEFAULT_BLOCK_SIZE as usize * 2]).unwrap();
let metadata = file.metadata().unwrap();
let mut permissions = metadata.permissions();
permissions.set_mode(0o644);
let timeval = TimeVal::new(60 * 2, 0);
utimes(tmp_dir.path().join("file").to_str().unwrap(), &timeval, &timeval).unwrap();
// We can't really test gid and uid, just trust me it works reading from the --file
let cmd = common::get_base_command("add-backhand")
.env("RUST_LOG", "none")
.args([
tmp_dir.path().join("out").to_str().unwrap(),
"/test/new",
tmp_dir.path().join("out1").to_str().unwrap(),
"--file",
tmp_dir.path().join("file").to_str().unwrap(),
"--gid",
"2",
"--uid",
"4242",
//"--mtime",
//"120",
])
.unwrap();
cmd.assert().code(0);
let cmd = common::get_base_command("add-backhand")
.env("RUST_LOG", "none")
.args([
tmp_dir.path().join("out1").to_str().unwrap(),
"/test/big_file",
tmp_dir.path().join("out2").to_str().unwrap(),
"--file",
tmp_dir.path().join("big_file").to_str().unwrap(),
"--gid",
"2",
"--uid",
"4242",
"--mtime",
"120",
])
.unwrap();
cmd.assert().code(0);
#[cfg(feature = "__test_unsquashfs")]
{
let dir = tmp_dir.path().join("out2");
let output = Command::new("unsquashfs")
.args(["-lln", "-UTC", dir.to_str().unwrap()])
.output()
.unwrap();
let expected = r#"drwxr-xr-x 1000/1000 36 2022-10-14 03:02 squashfs-root
drwxr-xr-x 1000/1000 24 2022-10-14 03:02 squashfs-root/b
drwxr-xr-x 1000/1000 24 2022-10-14 03:03 squashfs-root/b/c
-rw-r--r-- 1000/1000 39 2022-10-14 03:03 squashfs-root/b/c/d
dr----x--t 2/4242 42 1970-01-01 00:01 squashfs-root/test
-rw-r--r-- 4242/2 262144 1970-01-01 00:02 squashfs-root/test/big_file
-rw-r--r-- 4242/2 4 1970-01-01 00:02 squashfs-root/test/new
"#;
// using contains here, the output of squashfs varies between versions
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), expected);
}
}
#[test]
#[cfg(feature = "xz")]
fn test_dont_emit_compression_options() {
use std::fs::File;
use std::io::Write;
use std::os::unix::prelude::PermissionsExt;
use backhand::DEFAULT_BLOCK_SIZE;
use nix::sys::stat::utimes;
use nix::sys::time::TimeVal;
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "debe0986658b276be78c3836779d20464a03d9ba0a40903e6e8e947e434f4d67".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_08/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_add_compression_options";
common::download_backoff(&asset_defs, TEST_PATH);
let image_path = format!("{TEST_PATH}/{FILE_NAME}");
let tmp_dir = tempdir().unwrap();
let mut file = File::create(tmp_dir.path().join("file").to_str().unwrap()).unwrap();
file.write_all(b"nice").unwrap();
// with compression option
let out_image = tmp_dir.path().join("out-comp-options").display().to_string();
let cmd = common::get_base_command("add-backhand")
.env("RUST_LOG", "none")
.args([
&image_path,
"/new",
&out_image,
"--file",
tmp_dir.path().join("file").to_str().unwrap(),
"--no-compression-options",
])
.unwrap();
cmd.assert().code(0);
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args(["-s", "--quiet", &out_image])
.unwrap();
let stdout = std::str::from_utf8(&cmd.stdout).unwrap();
stdout.contains("Compression Options: None");
// with no compression option
let out_image = tmp_dir.path().join("out-no-comp-options").display().to_string();
let cmd = common::get_base_command("add-backhand")
.env("RUST_LOG", "none")
.args([
&image_path,
"/new",
&out_image,
"--file",
tmp_dir.path().join("file").to_str().unwrap(),
])
.unwrap();
cmd.assert().code(0);
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args(["-s", "--quiet", &out_image])
.unwrap();
let stdout = std::str::from_utf8(&cmd.stdout).unwrap();
stdout.contains("Compression Options: Some");
}
0707010000002C000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002B00000000backhand-0.23.0/backhand-test/tests/common0707010000002D000081A40000000000000000000000016854DB950000135C000000000000000000000000000000000000003200000000backhand-0.23.0/backhand-test/tests/common/mod.rsuse std::error::Error;
use std::process::Command;
use std::time::Duration;
use assert_cmd::prelude::*;
use backon::BlockingRetryable;
use backon::ExponentialBuilder;
use tempfile::tempdir;
use tempfile::tempdir_in;
use test_assets_ureq::TestAssetDef;
pub fn download_backoff(assets_defs: &[TestAssetDef], test_path: &str) {
test_assets_ureq::dl_test_files_backoff(assets_defs, test_path, true, Duration::from_secs(60))
.unwrap();
}
/// test the new squashfs vs the original squashfs with squashfs-tool/unsquashfs
/// by extract
pub fn test_squashfs_tools_unsquashfs(
control: &str,
new: &str,
control_offset: Option<u64>,
assert_success: bool,
) {
let control_dir = tempdir_in(".").unwrap();
let mut cmd = Command::new("unsquashfs");
let cmd = cmd.args([
"-d",
control_dir.path().join("squashfs-root-rust").to_str().unwrap(),
"-o",
&control_offset.unwrap_or(0).to_string(),
// we don't run as root, avoid special file errors
"-ignore-errors",
//"-no-exit-code",
control,
]);
// For older version of squashfs-tools that the cross-rs/cross projects uses,
// we can't using new -no-exit-code option in unsquashfs, so for the images
// that contain /dev devices we can't assert the success of unsquashfs.
if assert_success {
cmd.assert().code(&[0] as &[i32]);
} else {
cmd.assert();
}
let new_dir = tempdir_in(".").unwrap();
let mut cmd = Command::new("unsquashfs");
let cmd = cmd.args([
"-d",
new_dir.path().join("squashfs-root-rust").to_str().unwrap(),
"-o",
&control_offset.unwrap_or(0).to_string(),
// we don't run as root, avoid special file errors
"-ignore-errors",
//"-no-exit-code",
new,
]);
if assert_success {
cmd.assert().code(&[0] as &[i32]);
} else {
cmd.assert();
}
let d = dir_diff::is_different(
control_dir.path().join("squashfs-root-rust").to_str().unwrap(),
new_dir.path().join("squashfs-root-rust").to_str().unwrap(),
);
assert!(!d.expect("couldn't compare dirs"));
}
// Test that both our unsquashfs and unsquashfs both extract to the same
pub fn test_bin_unsquashfs(
file: &str,
file_offset: Option<u64>,
assert_success: bool,
run_squashfs_tools_unsquashfs: bool,
) {
let tmp_dir = tempdir_in(".").unwrap();
// Run "our" unsquashfs against the control
let cmd = get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "trace")
.args([
"-d",
tmp_dir.path().join("squashfs-root-rust").to_str().unwrap(),
"-o",
&file_offset.unwrap_or(0).to_string(),
file,
])
.unwrap();
tracing::info!("{:?}", cmd);
cmd.assert().code(&[0] as &[i32]);
// only squashfs-tools/unsquashfs when x86_64
if run_squashfs_tools_unsquashfs {
#[cfg(feature = "__test_unsquashfs")]
{
let mut cmd = Command::new("unsquashfs");
let cmd = cmd.args([
"-d",
tmp_dir.path().join("squashfs-root-c").to_str().unwrap(),
"-o",
&file_offset.unwrap_or(0).to_string(),
// we don't run as root, avoid special file errors
"-ignore-errors",
//"-no-exit-code",
file,
]);
// For older version of squashfs-tools that the cross-rs/cross projects uses,
// we can't using new -no-exit-code option in unsquashfs, so for the images
// that contain /dev devices we can't assert the success of unsquashfs.
if assert_success {
cmd.assert().code(&[0] as &[i32]);
} else {
cmd.assert();
}
tracing::info!("{:?}", cmd);
let d = dir_diff::is_different(
tmp_dir.path().join("squashfs-root-rust"),
tmp_dir.path().join("squashfs-root-c"),
);
// remove the following comment to keep around tmp dirs
// let _ = tmp_dir.into_path();
assert!(!d.expect("couldn't compare dirs"));
}
}
}
fn find_runner() -> Option<String> {
for (key, value) in std::env::vars() {
if key.starts_with("CARGO_TARGET_") && key.ends_with("_RUNNER") && !value.is_empty() {
return Some(value);
}
}
None
}
/// Under cargo cross (qemu), find runner
pub fn get_base_command(base: &str) -> Command {
let path = assert_cmd::cargo::cargo_bin(base);
let mut cmd;
if let Some(runner) = find_runner() {
let mut runner = runner.split_whitespace();
cmd = Command::new(runner.next().unwrap());
for arg in runner {
cmd.arg(arg);
}
cmd.arg(path);
} else {
cmd = Command::new(path);
}
cmd
}
0707010000002E000081A40000000000000000000000016854DB9500000516000000000000000000000000000000000000002E00000000backhand-0.23.0/backhand-test/tests/issues.rs/// https://github.com/wcampbell0x2a/backhand/issues/275
#[test]
#[cfg(feature = "xz")]
fn issue_275() {
let mut writer = std::io::Cursor::new(vec![]);
let mut fs = backhand::FilesystemWriter::default();
fs.write(&mut writer).unwrap();
}
/// https://github.com/wcampbell0x2a/backhand/issues/359
#[test]
#[cfg(feature = "xz")]
fn issue_359() {
let mut writer = std::io::Cursor::new(vec![]);
let mut fs = backhand::FilesystemWriter::default();
let header = backhand::NodeHeader { permissions: 0, uid: 1, gid: 2, mtime: 3 };
fs.push_dir_all("a/b/c/d/e/f/g", header).unwrap();
fs.write(&mut writer).unwrap();
}
/// https://github.com/wcampbell0x2a/backhand/issues/363
#[test]
#[cfg(feature = "xz")]
fn issue_363() {
let dummy_file = std::io::Cursor::new(&[]);
let dummy_header = backhand::NodeHeader::default();
let mut fs = backhand::FilesystemWriter::default();
// create a files
fs.push_file(dummy_file.clone(), "a", dummy_header).unwrap();
// try to put a file inside the first file
match fs.push_file(dummy_file, "a/b", dummy_header) {
// correct result: InvalidFilePath (or equivalent error?)
Err(backhand::BackhandError::InvalidFilePath) => {}
Ok(_) => panic!("Invalid result"),
x => x.unwrap(),
};
}
0707010000002F000081A40000000000000000000000016854DB9500000E2F000000000000000000000000000000000000002E00000000backhand-0.23.0/backhand-test/tests/mutate.rsmod common;
use std::fs::File;
use std::io::{BufReader, BufWriter, Cursor};
use backhand::{FilesystemReader, FilesystemWriter, NodeHeader};
use common::{test_bin_unsquashfs, test_squashfs_tools_unsquashfs};
use test_assets_ureq::TestAssetDef;
use test_log::test;
/// Before:
/// testing
/// └── a
/// └── b
/// └── c
/// └── d
/// └── e
/// └── first_file
/// After:
/// testing
/// ├── a
/// │ ├── b
/// │ │ └── c
/// │ │ └── d
/// │ │ └── e
/// │ │ ├── dude
/// │ │ └── first_file (modified)
/// │ └── d
/// │ └── e
/// │ └── new_file (added)
/// ├── ptr -> a/b/c/d/dude
/// └── root_file (added)
#[test]
#[cfg(feature = "xz")]
fn test_add_00() {
let asset_defs = [
TestAssetDef {
filename: "out.squashfs".to_string(),
hash: "8610cd350bbd51ca6c8b84c210ef24c57898845f75f5b4ae0c6d7e785efaab4f".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_add_00/out.squashfs".to_string(),
},
TestAssetDef {
filename: "new.squashfs".to_string(),
hash: "dc02848152d42b331fa0540000f68bf0942c5b00a3a44a3a6f208af34b4b6ec3".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_add_00/new.squashfs".to_string(),
},
TestAssetDef {
filename: "control.squashfs".to_string(),
hash: "b690b167ef3d6126ca4180e73cf0cb827f48405630278a64017208b6774b663b".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_add_00/control.squashfs".to_string(),
},
];
const TEST_PATH: &str = "test-assets/test_add_00";
let og_path = format!("{TEST_PATH}/out.squashfs");
let new_path = format!("{TEST_PATH}/bytes.squashfs");
common::download_backoff(&asset_defs, TEST_PATH);
let file = BufReader::new(File::open(&og_path).unwrap());
let og_filesystem = FilesystemReader::from_reader(file).unwrap();
let mut new_filesystem = FilesystemWriter::from_fs_reader(&og_filesystem).unwrap();
let h = NodeHeader { permissions: 0o755, uid: 0, gid: 0, mtime: 0 };
//create directories
new_filesystem.push_dir_all("a/d/e", h).unwrap();
new_filesystem.push_dir_all("a/b/c/d/e", h).unwrap();
// Add file
let bytes = Cursor::new(b"this is a new file, wowo!");
new_filesystem.push_file(bytes, "a/d/e/new_file", h).unwrap();
// Add file
new_filesystem.push_file(Cursor::new("i am (g)root"), "root_file", h).unwrap();
// Add file
new_filesystem.push_file(Cursor::new("dude"), "a/b/c/d/dude", h).unwrap();
new_filesystem.push_symlink("a/b/c/d/dude", "ptr", h).unwrap();
// Modify file
new_filesystem
.replace_file("/a/b/c/d/e/first_file", Cursor::new(b"MODIFIEDfirst file!\n"))
.unwrap();
// create the modified squashfs
{
let mut output = BufWriter::new(File::create(&new_path).unwrap());
new_filesystem.write(&mut output).unwrap();
}
// force output to drop, so buffer is written
// compare when on x86 host
#[cfg(feature = "__test_unsquashfs")]
{
let control_new_path = format!("{TEST_PATH}/control.squashfs");
test_squashfs_tools_unsquashfs(&new_path, &control_new_path, None, true);
test_bin_unsquashfs(&og_path, None, true, true);
test_bin_unsquashfs(&new_path, None, true, true);
}
}
07070100000030000081A40000000000000000000000016854DB95000015EB000000000000000000000000000000000000003400000000backhand-0.23.0/backhand-test/tests/non_standard.rsmod common;
use std::fs::File;
use std::io::{BufReader, BufWriter, Write};
use backhand::compression::{CompressionAction, Compressor, DefaultCompressor};
use backhand::kind::{self, Kind};
use backhand::{BackhandError, FilesystemCompressor, FilesystemReader, FilesystemWriter};
use test_assets_ureq::TestAssetDef;
use test_log::test;
use tracing::info;
/// - Download file
/// - Read into Squashfs
/// - Into Filesystem
/// - Into Bytes
/// - - Into Squashfs
/// - - Into Filesystem
/// - Can't test with unsquashfs, as it doesn't support these custom filesystems
fn full_test(
assets_defs: &[TestAssetDef],
filepath: &str,
test_path: &str,
offset: u64,
kind: &Kind,
pad: Option<u32>,
) {
common::download_backoff(&assets_defs, test_path);
let og_path = format!("{test_path}/{filepath}");
let new_path = format!("{test_path}/bytes.squashfs");
{
let file = BufReader::new(File::open(og_path).unwrap());
info!("calling from_reader");
let og_filesystem =
FilesystemReader::from_reader_with_offset_and_kind(file, offset, Kind::from_kind(kind))
.unwrap();
let mut new_filesystem = FilesystemWriter::from_fs_reader(&og_filesystem).unwrap();
if let Some(pad) = pad {
new_filesystem.set_kib_padding(pad);
}
// Test Debug is impl'ed properly on FilesystemWriter
let _ = format!("{new_filesystem:#02x?}");
// convert to bytes
info!("calling to_bytes");
let mut output = BufWriter::new(File::create(&new_path).unwrap());
new_filesystem.write_with_offset(&mut output, offset).unwrap();
}
{
// assert that our library can at least read the output
info!("calling from_reader");
let created_file = BufReader::new(File::open(&new_path).unwrap());
let _new_filesystem = FilesystemReader::from_reader_with_offset_and_kind(
created_file,
offset,
Kind::from_kind(kind),
)
.unwrap();
}
}
#[test]
#[cfg(feature = "gzip")]
fn test_non_standard_be_v4_0() {
use backhand::compression::DefaultCompressor;
const FILE_NAME: &str = "squashfs_v4.unblob.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "9c7c523c5d1d1cafc0b679af9092ce0289d9656f6a24bc3bd0009f95b69397c0".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_custom/squashfs_v4.unblob.bin"
.to_string(),
}];
const TEST_PATH: &str = "test-assets/non_standard_be_v4_0";
full_test(
&asset_defs,
FILE_NAME,
TEST_PATH,
0,
&Kind::from_const(kind::BE_V4_0).unwrap(),
None,
);
// test custom kind "builder-lite"
let _kind = Kind::new(&DefaultCompressor)
.with_magic(kind::Magic::Big)
.with_all_endian(kind::Endian::Big);
}
#[test]
#[cfg(feature = "gzip")]
fn test_non_standard_be_v4_1() {
const FILE_NAME: &str = "squashfs_v4.nopad.unblob.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "a29ddc15f5a6abcabf28b7161837eb56b34111e48420e7392e648f2fdfe956ed".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_custom/squashfs_v4.nopad.unblob.bin"
.to_string(),
}];
const TEST_PATH: &str = "test-assets/non_standard_be_v4_1";
full_test(
&asset_defs,
FILE_NAME,
TEST_PATH,
0,
&Kind::from_const(kind::BE_V4_0).unwrap(),
None,
);
}
#[test]
#[cfg(feature = "gzip")]
fn test_custom_compressor() {
use backhand::SuperBlock;
const FILE_NAME: &str = "squashfs_v4.nopad.unblob.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "a29ddc15f5a6abcabf28b7161837eb56b34111e48420e7392e648f2fdfe956ed".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_custom/squashfs_v4.nopad.unblob.bin"
.to_string(),
}];
#[derive(Copy, Clone)]
pub struct CustomCompressor;
// Special decompress that only has support for the Rust version of gzip: lideflator for
// decompression
impl CompressionAction for CustomCompressor {
fn decompress(
&self,
bytes: &[u8],
out: &mut Vec<u8>,
compressor: Compressor,
) -> Result<(), BackhandError> {
if let Compressor::Gzip = compressor {
out.resize(out.capacity(), 0);
let mut decompressor = libdeflater::Decompressor::new();
let amt = decompressor.zlib_decompress(bytes, out).unwrap();
out.truncate(amt);
} else {
unimplemented!();
}
Ok(())
}
// Just pass to default compressor
fn compress(
&self,
bytes: &[u8],
fc: FilesystemCompressor,
block_size: u32,
) -> Result<Vec<u8>, BackhandError> {
DefaultCompressor.compress(bytes, fc, block_size)
}
fn compression_options(
&self,
_superblock: &mut SuperBlock,
_kind: &Kind,
_fs_compressor: FilesystemCompressor,
) -> Result<Vec<u8>, BackhandError> {
DefaultCompressor.compression_options(_superblock, _kind, _fs_compressor)
}
}
let kind = Kind::new_with_const(&CustomCompressor, kind::BE_V4_0);
const TEST_PATH: &str = "test-assets/custom_compressor";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, &kind, Some(0));
}
07070100000031000081A40000000000000000000000016854DB9500001B7E000000000000000000000000000000000000002B00000000backhand-0.23.0/backhand-test/tests/raw.rsmod common;
use std::fs::File;
use std::io::{BufWriter, Cursor};
use backhand::compression::Compressor;
use backhand::{
kind, CompressionExtra, ExtraXz, FilesystemCompressor, FilesystemWriter, NodeHeader,
SuperBlock, DEFAULT_BLOCK_SIZE,
};
use common::{test_bin_unsquashfs, test_squashfs_tools_unsquashfs};
use test_assets_ureq::TestAssetDef;
#[test]
#[cfg(all(feature = "xz", feature = "gzip"))]
fn test_raw_00() {
use std::{io::BufReader, process::Command};
use backhand::{kind::Kind, FilesystemReader};
let asset_defs = [TestAssetDef {
filename: "control.squashfs".to_string(),
hash: "e3d8f94f8402412ecf742d44680f1dd5d8fd28cc3d1a502e5fcfcc9e2f5f949a".to_string(),
url: "https://wcampbell.dev/squashfs/testing/test_raw_00/control.squashfs".to_string(),
}];
const TEST_PATH: &str = "test-assets/test_raw_00";
let new_path = format!("{TEST_PATH}/bytes.squashfs");
common::download_backoff(&asset_defs, TEST_PATH);
let header = NodeHeader { permissions: 0o755, uid: 1000, gid: 1000, mtime: 0 };
let o_header = NodeHeader { permissions: 0o766, ..header };
// test out max xz level
let mut xz_extra = ExtraXz::default();
xz_extra.level(9).unwrap();
let extra = CompressionExtra::Xz(xz_extra);
let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
compressor.extra(extra).unwrap();
let time = 0x634f_5237;
// (some of these are already set with default(), but just testing...)
let mut fs: FilesystemWriter = FilesystemWriter::default();
fs.set_time(time);
fs.set_block_size(DEFAULT_BLOCK_SIZE);
fs.set_root_mode(0o777);
fs.set_root_uid(1000);
fs.set_root_gid(1000);
fs.set_compressor(compressor);
fs.set_kind(Kind::from_const(kind::LE_V4_0).unwrap());
fs.set_kib_padding(8);
//don't do anything if the directory exists
fs.push_dir_all("usr/bin", o_header).unwrap();
fs.push_file(Cursor::new(vec![0x00, 0x01]), "usr/bin/heyo", header).unwrap();
fs.push_dir_all("this/is/a", o_header).unwrap();
fs.push_file(Cursor::new(vec![0x0f; 0xff]), "this/is/a/file", header).unwrap();
// create the modified squashfs
let mut output = BufWriter::new(File::create(&new_path).unwrap());
let (superblock, bytes_written) = fs.write(&mut output).unwrap();
// 8KiB
assert_eq!(bytes_written, 0x2000);
assert_eq!(
superblock,
SuperBlock {
magic: [0x68, 0x73, 0x71, 0x73],
inode_count: 0x8,
mod_time: time,
block_size: 0x20000,
frag_count: 0x1,
compressor: Compressor::Xz,
block_log: 0x11,
flags: backhand::Flags::DataHasBeenDeduplicated as u16,
id_count: 0x2,
version_major: 0x4,
version_minor: 0x0,
root_inode: 0xe0,
bytes_used: 0x1f4,
id_table: 0x1ec,
xattr_table: 0xffffffffffffffff,
inode_table: 0xac,
dir_table: 0x13a,
frag_table: 0x1da,
export_table: 0xffffffffffffffff,
}
);
// compare
#[cfg(feature = "__test_unsquashfs")]
{
let output = Command::new("unsquashfs").args(["-lln", "-UTC", &new_path]).output().unwrap();
let expected = r#"drwxrwxrwx 1000/1000 38 1970-01-01 00:00 squashfs-root
drwxrw-rw- 1000/1000 25 1970-01-01 00:00 squashfs-root/this
drwxrw-rw- 1000/1000 24 1970-01-01 00:00 squashfs-root/this/is
drwxrw-rw- 1000/1000 27 1970-01-01 00:00 squashfs-root/this/is/a
-rwxr-xr-x 1000/1000 255 1970-01-01 00:00 squashfs-root/this/is/a/file
drwxrw-rw- 1000/1000 26 1970-01-01 00:00 squashfs-root/usr
drwxrw-rw- 1000/1000 27 1970-01-01 00:00 squashfs-root/usr/bin
-rwxr-xr-x 1000/1000 2 1970-01-01 00:00 squashfs-root/usr/bin/heyo
"#;
// using contains here, the output of squashfs varies between versions
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), expected);
let control_new_path = format!("{TEST_PATH}/control.squashfs");
test_squashfs_tools_unsquashfs(&new_path, &control_new_path, None, true);
test_bin_unsquashfs(&new_path, None, true, true);
}
// Test downing the compression level
let file = BufReader::new(File::open(&new_path).unwrap());
let fs = FilesystemReader::from_reader(file).unwrap();
let mut fs = FilesystemWriter::from_fs_reader(&fs).unwrap();
let mut xz_extra = ExtraXz::default();
xz_extra.level(1).unwrap();
let extra = CompressionExtra::Xz(xz_extra);
let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
compressor.extra(extra).unwrap();
fs.set_compressor(compressor);
// create the modified squashfs
let new_path = format!("{TEST_PATH}/bytes_less_xz.squashfs");
let mut output = BufWriter::new(File::create(&new_path).unwrap());
let (_superblock, _bytes_written) = fs.write(&mut output).unwrap();
// compare
#[cfg(feature = "__test_unsquashfs")]
{
let control_new_path = format!("{TEST_PATH}/control.squashfs");
test_squashfs_tools_unsquashfs(&new_path, &control_new_path, None, true);
test_bin_unsquashfs(&new_path, None, true, true);
}
// Test picking a different compression
let file = BufReader::new(File::open(&new_path).unwrap());
let fs = FilesystemReader::from_reader(file).unwrap();
let mut fs = FilesystemWriter::from_fs_reader(&fs).unwrap();
let compressor = FilesystemCompressor::new(Compressor::Gzip, None).unwrap();
fs.set_compressor(compressor);
// create the modified squashfs
let new_path = format!("{TEST_PATH}/bytes_gzip.squashfs");
let mut output = BufWriter::new(File::create(&new_path).unwrap());
let (_superblock, _bytes_written) = fs.write(&mut output).unwrap();
// compare
#[cfg(feature = "__test_unsquashfs")]
{
let control_new_path = format!("{TEST_PATH}/control.squashfs");
test_squashfs_tools_unsquashfs(&new_path, &control_new_path, None, true);
test_bin_unsquashfs(&new_path, None, true, true);
}
// Test changing block size
let file = BufReader::new(File::open(&new_path).unwrap());
let fs = FilesystemReader::from_reader(file).unwrap();
let mut fs = FilesystemWriter::from_fs_reader(&fs).unwrap();
fs.set_block_size(DEFAULT_BLOCK_SIZE * 2);
// create the modified squashfs
let new_path = format!("{TEST_PATH}/bytes_bigger_blocks.squashfs");
let mut output = BufWriter::new(File::create(&new_path).unwrap());
let (_superblock, _bytes_written) = fs.write(&mut output).unwrap();
// compare
#[cfg(feature = "__test_unsquashfs")]
{
let control_new_path = format!("{TEST_PATH}/control.squashfs");
test_squashfs_tools_unsquashfs(&new_path, &control_new_path, None, true);
test_bin_unsquashfs(&new_path, None, true, true);
}
}
07070100000032000081A40000000000000000000000016854DB950000094E000000000000000000000000000000000000002F00000000backhand-0.23.0/backhand-test/tests/replace.rsmod common;
use std::process::Command;
use assert_cmd::prelude::*;
use tempfile::tempdir;
use test_assets_ureq::TestAssetDef;
use test_log::test;
#[test]
#[cfg(feature = "xz")]
fn test_replace() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "6195e4d8d14c63dffa9691d36efa1eda2ee975b476bb95d4a0b59638fd9973cb".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_05/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_05";
common::download_backoff(&asset_defs, TEST_PATH);
let image_path = format!("{TEST_PATH}/{FILE_NAME}");
// extract single file
let tmp_dir = tempdir().unwrap();
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args([
"--path-filter",
r#"/b/c/d"#,
"-i",
&image_path,
"-d",
tmp_dir.path().join("squashfs-root-rust").to_str().unwrap(),
])
.unwrap();
cmd.assert().code(0);
// edit that file
let text = b"The mystery of life isn't a problem to solve, but a reality to experience.";
std::fs::write(tmp_dir.path().join("squashfs-root-rust/b/c/d").to_str().unwrap(), text)
.unwrap();
// replace that file
let cmd = common::get_base_command("replace-backhand")
.env("RUST_LOG", "none")
.args([
&image_path,
tmp_dir.path().join("squashfs-root-rust/b/c/d").to_str().unwrap(),
"/b/c/d",
tmp_dir.path().join("replaced").to_str().unwrap(),
])
.unwrap();
cmd.assert().code(0);
// extract
{
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args([
"--path-filter",
r#"/b/c/d"#,
"-i",
tmp_dir.path().join("replaced").to_str().unwrap(),
"-d",
tmp_dir.path().join("squashfs-root-rust2").to_str().unwrap(),
])
.unwrap();
cmd.assert().code(0);
// assert the text changed!
let bytes =
std::fs::read(tmp_dir.path().join("squashfs-root-rust2/b/c/d").to_str().unwrap())
.unwrap();
assert_eq!(bytes, text);
}
}
07070100000033000081A40000000000000000000000016854DB9500004DE9000000000000000000000000000000000000002C00000000backhand-0.23.0/backhand-test/tests/test.rsmod common;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use assert_cmd::prelude::*;
use assert_cmd::Command;
use backhand::{FilesystemReader, FilesystemWriter};
use common::{test_bin_unsquashfs, test_squashfs_tools_unsquashfs};
use tempfile::tempdir;
use test_assets_ureq::TestAssetDef;
use test_log::test;
use tracing::info;
#[cfg(feature = "gzip")]
fn has_gzip_feature() -> bool {
true
}
#[cfg(not(feature = "gzip"))]
fn has_gzip_feature() -> bool {
false
}
enum Verify {
Extract,
}
fn only_read(assets_defs: &[TestAssetDef], filepath: &str, test_path: &str, offset: u64) {
common::download_backoff(assets_defs, test_path);
let og_path = format!("{test_path}/{filepath}");
let file = BufReader::new(File::open(&og_path).unwrap());
info!("calling from_reader");
let _ = FilesystemReader::from_reader_with_offset(file, offset).unwrap();
// TODO: this should still check our own unsquashfs
}
/// - Download file
/// - Read into Squashfs
/// - Into Filesystem
/// - Into Bytes
/// - - Into Squashfs
/// - - Into Filesystem
/// - unsquashfs-tools/unsquashfs both and assert to diff in files
fn full_test(
assets_defs: &[TestAssetDef],
filepath: &str,
test_path: &str,
offset: u64,
verify: Verify,
assert_success: bool,
) {
full_test_inner(assets_defs, filepath, test_path, offset, verify, assert_success, true)
}
fn full_test_inner(
assets_defs: &[TestAssetDef],
filepath: &str,
test_path: &str,
offset: u64,
verify: Verify,
assert_success: bool,
run_squashfs_tools_unsquashfs: bool,
) {
common::download_backoff(assets_defs, test_path);
let og_path = format!("{test_path}/{filepath}");
let new_path = format!("{test_path}/bytes.squashfs");
let file = BufReader::new(File::open(&og_path).unwrap());
info!("calling from_reader");
let og_filesystem = FilesystemReader::from_reader_with_offset(file, offset).unwrap();
let og_comp_opts = og_filesystem.compression_options;
let mut new_filesystem = FilesystemWriter::from_fs_reader(&og_filesystem).unwrap();
// convert to bytes
info!("calling to_bytes");
let mut output = BufWriter::new(File::create(&new_path).unwrap());
new_filesystem.write_with_offset(&mut output, offset).unwrap();
info!("done with writing to bytes");
drop(new_filesystem);
drop(og_filesystem);
// assert that our library can at least read the output, use unsquashfs to really assert this
info!("calling from_reader");
let created_file = BufReader::new(File::open(&new_path).unwrap());
let written_new_filesystem =
FilesystemReader::from_reader_with_offset(created_file, offset).unwrap();
// compression options are the same
let new_comp_opts = written_new_filesystem.compression_options;
assert_eq!(og_comp_opts, new_comp_opts);
drop(written_new_filesystem);
match verify {
Verify::Extract => {
if run_squashfs_tools_unsquashfs {
#[cfg(feature = "__test_unsquashfs")]
{
info!("starting squashfs-tools/unsquashfs test");
test_squashfs_tools_unsquashfs(
&og_path,
&new_path,
Some(offset),
assert_success,
);
}
}
info!("starting backhand/unsquashfs original test");
test_bin_unsquashfs(
&og_path,
Some(offset),
assert_success,
run_squashfs_tools_unsquashfs,
);
info!("starting backhand/unsquashfs created test");
test_bin_unsquashfs(
&new_path,
Some(offset),
assert_success,
run_squashfs_tools_unsquashfs,
);
}
}
}
/// mksquashfs ./target/release/squashfs-deku out.squashfs -comp gzip -Xcompression-level 2 -always-use-fragments
#[test]
#[cfg(any(feature = "gzip"))]
fn test_00() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "976c1638d8c1ba8014de6c64b196cbd70a5acf031be10a8e7f649536193c8e78".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_00/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_00";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
/// mksquashfs ./target/release/squashfs-deku out.squashfs -comp gzip -Xcompression-level 2
#[test]
#[cfg(any(feature = "gzip"))]
fn test_01() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "9d9f5ba77b562fd4141fc725038028822673b24595e2774a8718260f4fc39710".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_01/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_01";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
/// mksquashfs ./target/release/squashfs-deku out.squashfs -comp xz
#[test]
#[cfg(feature = "xz")]
fn test_02() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "c18d1b57e73740ab4804672c61f5c77f170cc16179d9a7e12dd722ba311f5623".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_02/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_02";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
/// mksquashfs ./target/release/squashfs-deku Cargo.toml out.squashfs -comp xz
#[test]
#[cfg(feature = "xz")]
fn test_03() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "4171d9dd5a53f2ad841715af1c01351028a9d9df13e4ae8172f37660306c0473".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_03/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_03";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "xz")]
fn test_04() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "bfb3424bf3b744b8c7a156c9c538310c49fbe8a57f336864f00210e6f356f2c3".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_04/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_04";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "xz")]
fn test_05() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "6195e4d8d14c63dffa9691d36efa1eda2ee975b476bb95d4a0b59638fd9973cb".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_05/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_05";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
/// mksquashfs ./target/release/squashfs-deku out.squashfs -comp gzip -always-use-fragments
#[test]
#[cfg(any(feature = "gzip"))]
fn test_06() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "3c5db6e8c59a4e1291a016f736fbf76ddc1e07fa4bc8940eac1754975b4c617b".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_06/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_06";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
/// mksquashfs ./target/release/squashfs-deku out.squashfs -comp gzip
#[test]
#[cfg(any(feature = "gzip"))]
fn test_07() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "6bc1571d82473e74a55cfd2d07ce21d9150ea4ad5941d2345ea429507d812671".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_07/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_07";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
// mksquashfs ./target/release/squashfs-deku out.squashfs -comp xz -Xbcj arm
#[test]
#[cfg(feature = "xz")]
fn test_08() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "debe0986658b276be78c3836779d20464a03d9ba0a40903e6e8e947e434f4d67".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_08/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_08";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "xz")]
fn test_19() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "4dc83c3eea0d7ae2a23c891798d485ba0eded862db5e1528a984e08b35255b0f".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_19/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_19";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "xz")]
fn test_20() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "4f00c0debb2d40ecb45f8d5d176a97699a8e07727713883899e6720331d67078".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_20/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_20";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "xz")]
fn test_openwrt_tplink_archera7v5() {
const FILE_NAME: &str =
"openwrt-22.03.2-ath79-generic-tplink_archer-a7-v5-squashfs-factory.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "ce0bfab79550885cb7ced388caaaa9bd454852bf1f9c34789abc498eb6c74df6".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ath79/generic/{FILE_NAME}"
),
}];
const TEST_PATH: &str = "test-assets/test_openwrt_tplink_archera7v5";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0x0022_5fd0, Verify::Extract, false);
}
#[test]
#[cfg(feature = "xz")]
fn test_openwrt_netgear_ex6100v2() {
const FILE_NAME: &str = "openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "9608a6cb558f1a4aa9659257f7c0b401f94343d10ec6e964fc4a452b4f91bea4".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ipq40xx/generic/{FILE_NAME}"
),
}];
const TEST_PATH: &str = "test-assets/test_openwrt_netgear_ex6100v2";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0x002c_0080, Verify::Extract, false);
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_appimage_plexamp() {
const FILE_NAME: &str = "Plexamp-4.6.1.AppImage";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "6d2a3fba571da54e6869c2f7e1f7e6ca22f380a9a6f7a44a5ac675d1c656b584".to_string(),
url: format!("https://plexamp.plex.tv/plexamp.plex.tv/desktop/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_appimage_plexamp";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0x2dfe8, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0x2dfe8);
}
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_appimage_firefox() {
const FILE_NAME: &str = "firefox-108.0.r20221215175817-x86_64.AppImage";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "78368f6c9c7080da7e3d7ceea8e64a8352c0f4ce39eb97d51de99943fd222e03".to_string(),
url: "https://github.com/srevinsaju/Firefox-Appimage/releases/download/firefox-v108.0.r20221215175817/firefox-108.0.r20221215175817-x86_64.AppImage".to_string(),
}];
const TEST_PATH: &str = "test-assets/test_appimage_firefox";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0x2f4c0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0x2f4c0);
}
}
/// Archer\ AX1800\(US\)_V3_221016.zip from https://www.tp-link.com/us/support/download/archer-ax1800/#Firmware
/// (after ubi_extract_image)
#[test]
#[cfg(feature = "xz")]
fn test_tplink_ax1800() {
const FILE_NAME: &str = "img-1571203182_vol-ubi_rootfs.ubifs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "e6adbea10615a8ed9f88e403e2478010696f421f4d69a790d37d97fe8921aa81".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_tplink1800/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_tplink_ax1800";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, false);
}
/// one /console char device
#[test]
#[cfg(feature = "xz")]
fn test_21() {
const FILE_NAME: &str = "out.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "8fe23229be6c3e24b9565007f9f9a25e8e796270cf7ce8518da131e95bb90bad".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_21/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_21";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, false);
}
#[test]
#[cfg(feature = "xz")]
fn test_er605() {
const FILE_NAME: &str = "2611E3.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "8f69958e5e25a7b9162342739305361dcd6b5a56970e342d85060f9f3be6313c".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_er605_v2_2.0.1/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_er605_v2_2";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, false);
}
#[test]
#[cfg(feature = "xz")]
fn test_re815xe() {
const FILE_NAME: &str = "870D97.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "a73325883568ba47eaa5379c7768ded5661d61841a81d6c987371842960ac6a2".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_re815xev1.60/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_re815_xev160";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, false);
}
#[test]
#[cfg(feature = "xz")]
fn test_slow_archlinux_iso_rootfs() {
const FILE_NAME: &str = "airootfs.sfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "c5a2e50d08c06719e003e59f19c3c618bfd85c495112d10cf3871e17d9a17ad6".to_string(),
url: format!("https://archive.archlinux.org/iso/2023.06.01/arch/x86_64/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_archlinux_iso_rootfs";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_many_files() {
const FILE_NAME: &str = "many_files.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "43723443fa8acedbd67384ba9b02806f8a1e53014282eb9c871aa78ec08a0e44".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_many_files/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_many_files";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_many_dirs() {
const FILE_NAME: &str = "many_dirs.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "2606237d69ebeee9a5da22a63c564921f3ec267c5377ddfbb3aa99409558daf0".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_many_dirs/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_many_dirs";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_few_dirs_many_files() {
const FILE_NAME: &str = "few_dirs_many_files.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "66543a46cf96d5e59b47203c421f7967ad552057f09c625fc08131325bc995bd".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_few_dirs_many_files/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_few_dirs_many_files";
if has_gzip_feature() {
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
#[test]
#[cfg(any(feature = "gzip"))]
fn test_socket_fifo() {
const FILE_NAME: &str = "squashfs_v4.specfile.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "d27f2e4baf57df961b9aa7298ac390a54fd0d2c904bf1d4baaee49cbdd0a93f1".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_socket_fifo/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/socket_fifo";
if has_gzip_feature() {
full_test_inner(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true, false);
} else {
only_read(&asset_defs, FILE_NAME, TEST_PATH, 0);
}
}
#[test]
#[cfg(any(feature = "zstd"))]
fn no_qemu_test_crates_zstd() {
const FILE_NAME: &str = "crates-io.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "f9d9938626c6cade032a3e54ce9e16fbabaf9e0cb6a0eb486c5c189d7fb9d13d".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/crates.io-zstd/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/crates_io_zstd";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, false);
}
#[test]
#[cfg(feature = "xz")]
fn test_slow_sparse_data_issue_623() {
const FILE_NAME: &str = "aosc-os_buildkit_20250606_amd64.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "4c0e4f9784c13e2b205b27cb3da566ea70299970403733e7253bb2fddc9efba1".to_string(),
url: "https://releases.aosc.io/os-amd64/buildkit/aosc-os_buildkit_20250606_amd64.squashfs"
.to_string(),
}];
const TEST_PATH: &str = "test-assets/test_sparse_data_issue_623";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
#[test]
#[cfg(feature = "lz4")]
fn test_lz4_write_read() {
const FILE_NAME: &str = "testing.lz4.squash";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "5ea80b6aa0da73ef30fc3fe405b1700758819f85e7140be2278f5db3f9123a21".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/lz4/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_lz4_write_read";
full_test(&asset_defs, FILE_NAME, TEST_PATH, 0, Verify::Extract, true);
}
07070100000034000081A40000000000000000000000016854DB9500000FE4000000000000000000000000000000000000003200000000backhand-0.23.0/backhand-test/tests/unsquashfs.rsmod common;
use assert_cmd::prelude::*;
use test_assets_ureq::TestAssetDef;
#[test]
#[cfg(feature = "xz")]
#[cfg(feature = "__test_unsquashfs")]
fn test_unsquashfs_cli() {
const FILE_NAME: &str = "870D97.squashfs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "a73325883568ba47eaa5379c7768ded5661d61841a81d6c987371842960ac6a2".to_string(),
url: format!("wcampbell.dev/squashfs/testing/test_re815xev1/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_re815_xev160";
common::download_backoff(&asset_defs, TEST_PATH);
let image_path = format!("{TEST_PATH}/{FILE_NAME}");
// single file
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args(["--path-filter", r#"/usr/bin/wget"#, "-l", "--quiet", &image_path])
.unwrap();
cmd.assert().stdout(
r#"/
/usr
/usr/bin
/usr/bin/wget
"#,
);
// multiple file
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args(["--path-filter", r#"/www/webpages/data"#, "-l", "--quiet", &image_path])
.unwrap();
cmd.assert().stdout(
r#"/
/www
/www/webpages
/www/webpages/data
/www/webpages/data/region.json
/www/webpages/data/timezone.json
"#,
);
// stat
//
// the following is squashfs-tools/unsquashfs -s
// Found a valid SQUASHFS 4:0 superblock on test-assets/test_re815_xev160/870D97.squashfs.
// Creation or last append time Fri Sep 2 07:26:23 2022
// Filesystem size 19957138 bytes (19489.39 Kbytes / 19.03 Mbytes)
// Compression xz
// Block size 131072
// Filesystem is exportable via NFS
// Inodes are compressed
// Data is compressed
// Uids/Gids (Id table) are compressed
// Fragments are compressed
// Always-use-fragments option is not specified
// Xattrs are compressed
// Duplicates are removed
// Number of fragments 169
// Number of inodes 1828
// Number of ids 1
// Number of xattr ids 0
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args(["-s", "--quiet", &image_path])
.unwrap();
cmd.assert().stdout(
r#"SuperBlock {
magic: [
0x000068,
0x000073,
0x000071,
0x000073,
],
inode_count: 0x000724,
mod_time: 0x6311e85f,
block_size: 0x020000,
frag_count: 0x0000a9,
compressor: Xz,
block_log: 0x000011,
flags: 0x0000c0,
id_count: 0x000001,
version_major: 0x000004,
version_minor: 0x000000,
root_inode: 0x3c6e1276,
bytes_used: 0x1308592,
id_table: 0x130858a,
xattr_table: 0xffffffffffffffff,
inode_table: 0x12fec8c,
dir_table: 0x1302d9c,
frag_table: 0x13076e0,
export_table: 0x1308574,
}
Compression Options: None
flag: data has been deduplicated
flag: nfs export table exists
"#,
);
}
#[test]
#[cfg(feature = "xz")]
fn test_unsquashfs_cli_auto_offset() {
use tempfile::tempdir;
const FILE_NAME: &str =
"openwrt-22.03.2-ath79-generic-tplink_archer-a7-v5-squashfs-factory.bin";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "ce0bfab79550885cb7ced388caaaa9bd454852bf1f9c34789abc498eb6c74df6".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ath79/generic/{FILE_NAME}"
),
}];
const TEST_PATH: &str = "test-assets/test_openwrt_tplink_archera7v5";
common::download_backoff(&asset_defs, TEST_PATH);
let image_path = format!("{TEST_PATH}/{FILE_NAME}");
let tmp_dir = tempdir().unwrap();
{
let cmd = common::get_base_command("unsquashfs-backhand")
.env("RUST_LOG", "none")
.args([
"--auto-offset",
"-d",
tmp_dir.path().join("squashfs-root-c").to_str().unwrap(),
&image_path,
])
.unwrap();
cmd.assert().code(&[0] as &[i32]);
}
}
07070100000035000081A40000000000000000000000016854DB950000097A000000000000000000000000000000000000002400000000backhand-0.23.0/backhand/Cargo.toml[package]
name = "backhand"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
rust-version = "1.84"
description = "Library for the reading, creating, and modification of SquashFS file systems"
readme = "../README.md"
[package.metadata.docs.rs]
features = ["xz", "gzip", "zstd", "document-features"]
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
deku = { version = "0.19.1", default-features = false, features = ["std"] }
tracing = { version = "0.1.40" }
thiserror = "2.0.1"
flate2 = { version = "1.1.0", optional = true, default-features = false, features = ["zlib-rs"] }
liblzma = { version = "0.4.1", optional = true, default-features = false, features = ["static", "parallel"] }
rust-lzo = { version = "0.6.2", optional = true }
zstd = { version = "0.13.2", optional = true }
zstd-safe = { version = "7.2.1", optional = true }
document-features = { version = "0.2.10", optional = true }
xxhash-rust = { version = "0.8.12", features = ["xxh64"] }
solana-nohash-hasher = "0.2.1"
lz4_flex = { version = "0.11.3", optional = true, default-features = false }
rayon = { version = "1.10.0", optional = true, default-features = false }
[features]
default = ["xz", "gzip", "zstd", "lz4", "parallel"]
## Enables xz compression inside library and binaries
xz = ["dep:liblzma"]
## Enables xz compression and forces static build inside library and binaries
xz-static = ["dep:liblzma", "liblzma?/static"]
## Enables gzip compression inside library and binaries using flate2 library with zlib-rs
gzip = ["any-flate2", "any-gzip", "dep:flate2"]
## This library is licensed GPL and thus disabled by default
lzo = ["dep:rust-lzo"]
## Enables zstd compression inside library and binaries
zstd = ["dep:zstd", "dep:zstd-safe"]
## Enables Lz4 compression
lz4 = ["dep:lz4_flex"]
## Internal only
any-gzip = []
## Internal only
any-flate2 = []
## Enable parallel decompression
parallel = ["dep:rayon"]
[dev-dependencies]
test-log = { version = "0.2.16", features = ["trace"] }
test-assets-ureq = "0.3.0"
assert_cmd = { version = "2.0.16", features = ["color", "color-auto"] }
dir-diff = { git = "https://github.com/wcampbell0x2a/dir-diff", branch = "add-checking-permissions" }
tempfile = "3.14.0"
criterion = "0.6"
libdeflater = "1.22.0"
[[bench]]
name = "benchmark"
harness = false
[lib]
bench = false
07070100000036000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002100000000backhand-0.23.0/backhand/benches07070100000037000081A40000000000000000000000016854DB9500001C19000000000000000000000000000000000000002E00000000backhand-0.23.0/backhand/benches/benchmark.rsuse std::fs::File;
use std::io::{BufReader, Cursor};
use std::process::Command;
use std::time::Duration;
use assert_cmd::prelude::*;
use backhand::{FilesystemReader, FilesystemWriter};
use criterion::*;
use tempfile::tempdir;
use test_assets_ureq::dl_test_files_backoff;
use test_assets_ureq::TestAssetDef;
fn read_write(file: File, offset: u64) {
let file = BufReader::new(file);
let og_filesystem = FilesystemReader::from_reader_with_offset(file, offset).unwrap();
let mut new_filesystem = FilesystemWriter::from_fs_reader(&og_filesystem).unwrap();
// convert to bytes
let mut output = Cursor::new(vec![]);
black_box(new_filesystem.write(&mut output).unwrap());
}
fn read(file: File, offset: u64) {
let file = BufReader::new(file);
black_box(FilesystemReader::from_reader_with_offset(file, offset).unwrap());
}
pub fn bench_read_write(c: &mut Criterion) {
let mut group = c.benchmark_group("write_read");
group.sampling_mode(SamplingMode::Flat);
group.sample_size(10);
const FILE_NAME_00: &str =
"openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img";
let asset_defs = [TestAssetDef {
filename: FILE_NAME_00.to_string(),
hash: "9608a6cb558f1a4aa9659257f7c0b401f94343d10ec6e964fc4a452b4f91bea4".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ipq40xx/generic/{FILE_NAME_00}"
),
}];
const TEST_PATH_00: &str = "../backhand-cli/test-assets/test_openwrt_netgear_ex6100v2";
dl_test_files_backoff(&asset_defs, TEST_PATH_00, true, Duration::from_secs(1)).unwrap();
let og_path = format!("{TEST_PATH_00}/{FILE_NAME_00}");
group.bench_function("netgear_ax6100v2", |b| {
b.iter(|| {
let file = File::open(&og_path).unwrap();
read_write(file, 0x2c0080)
})
});
const FILE_NAME: &str = "img-1571203182_vol-ubi_rootfs.ubifs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "e6adbea10615a8ed9f88e403e2478010696f421f4d69a790d37d97fe8921aa81".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_tplink1800/{FILE_NAME}"),
}];
const TEST_PATH: &str = "test-assets/test_tplink_ax1800";
dl_test_files_backoff(&asset_defs, TEST_PATH, true, Duration::from_secs(1)).unwrap();
let og_path = format!("{TEST_PATH}/{FILE_NAME}");
group.bench_function("tplink_ax1800", |b| {
b.iter(|| {
let file = File::open(&og_path).unwrap();
read_write(file, 0)
})
});
group.finish();
}
pub fn bench_read(c: &mut Criterion) {
let mut group = c.benchmark_group("only_read");
group.sampling_mode(SamplingMode::Flat);
group.sample_size(10);
const FILE_NAME_00: &str =
"openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img";
let asset_defs = [TestAssetDef {
filename: FILE_NAME_00.to_string(),
hash: "9608a6cb558f1a4aa9659257f7c0b401f94343d10ec6e964fc4a452b4f91bea4".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ipq40xx/generic/{FILE_NAME_00}"
),
}];
const TEST_PATH_00: &str = "../backhand-cli/test-assets/test_openwrt_netgear_ex6100v2";
dl_test_files_backoff(&asset_defs, TEST_PATH_00, true, Duration::from_secs(1)).unwrap();
let og_path = format!("{TEST_PATH_00}/{FILE_NAME_00}");
group.bench_function("netgear_ax6100v2", |b| {
b.iter(|| {
let file = File::open(&og_path).unwrap();
read(file, 0x2c0080)
})
});
const FILE_NAME_01: &str = "img-1571203182_vol-ubi_rootfs.ubifs";
let asset_defs = [TestAssetDef {
filename: FILE_NAME_01.to_string(),
hash: "e6adbea10615a8ed9f88e403e2478010696f421f4d69a790d37d97fe8921aa81".to_string(),
url: format!("https://wcampbell.dev/squashfs/testing/test_tplink1800/{FILE_NAME_01}"),
}];
const TEST_PATH_01: &str = "test-assets/test_tplink_ax1800";
dl_test_files_backoff(&asset_defs, TEST_PATH_01, true, Duration::from_secs(1)).unwrap();
let og_path = format!("{TEST_PATH_01}/{FILE_NAME_01}");
group.bench_function("tplink_ax1800", |b| {
b.iter(|| {
let file = File::open(&og_path).unwrap();
read(file, 0)
})
});
group.finish();
}
pub fn bench_unsquashfs_extract(c: &mut Criterion) {
let mut group = c.benchmark_group("unsquashfs");
const FILE_NAME: &str = "openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img";
let asset_defs = &[TestAssetDef {
filename: FILE_NAME.to_string(),
hash: "9608a6cb558f1a4aa9659257f7c0b401f94343d10ec6e964fc4a452b4f91bea4".to_string(),
url: format!(
"https://downloads.openwrt.org/releases/22.03.2/targets/ipq40xx/generic/{FILE_NAME}"
),
}];
// Local, because we run unsquashfs
const TEST_PATH: &str = "test-assets/test_openwrt_netgear_ex6100v2";
dl_test_files_backoff(asset_defs, TEST_PATH, true, Duration::from_secs(1)).unwrap();
let path = format!("{TEST_PATH}/{FILE_NAME}");
let tmp_dir = tempdir().unwrap();
group.bench_function("full", |b| {
b.iter(|| {
let cmd = Command::new(assert_cmd::cargo::cargo_bin("unsquashfs-backhand"))
.env("RUST_LOG", "none")
.args([
"--auto-offset",
"--quiet",
"-d",
tmp_dir.path().join("squashfs-out").to_str().unwrap(),
&path,
])
.unwrap();
cmd.assert().code(&[0] as &[i32]);
})
});
// 38 nodes
group.bench_function("full-path-filter", |b| {
b.iter(|| {
let cmd = Command::new(assert_cmd::cargo::cargo_bin("unsquashfs-backhand"))
.env("RUST_LOG", "none")
.args([
"--auto-offset",
"--quiet",
"--path-filter",
"/usr/sbin/",
"-d",
tmp_dir.path().join("squashfs-out").to_str().unwrap(),
&path,
])
.unwrap();
cmd.assert().code(&[0] as &[i32]);
})
});
group.bench_function("list", |b| {
b.iter(|| {
let cmd = Command::new(assert_cmd::cargo::cargo_bin("unsquashfs-backhand"))
.env("RUST_LOG", "none")
.args(["--auto-offset", "-l", &path])
.unwrap();
cmd.assert().code(&[0] as &[i32]);
})
});
group.bench_function("list-path-filter", |b| {
b.iter(|| {
let cmd = Command::new(assert_cmd::cargo::cargo_bin("unsquashfs-backhand"))
.env("RUST_LOG", "none")
.args(["--auto-offset", "-l", "--path-filter", "/usr/sbin/", &path])
.unwrap();
cmd.assert().code(&[0] as &[i32]);
})
});
group.finish();
}
criterion_group!(benches, bench_read_write, bench_read, bench_unsquashfs_extract);
criterion_main!(benches);
07070100000038000081A40000000000000000000000016854DB95000000F0000000000000000000000000000000000000002600000000backhand-0.23.0/backhand/release.tomlpush=false
publish=false
pre-release-replacements = [
{file="src/lib.rs", search="backhand = .*", replace="{{crate_name}} = \"{{version}}\""},
{file="../README.md", search="backhand = .*", replace="{{crate_name}} = \"{{version}}\""},
]
07070100000039000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001D00000000backhand-0.23.0/backhand/src0707010000003A000081A40000000000000000000000016854DB95000039CA000000000000000000000000000000000000002B00000000backhand-0.23.0/backhand/src/compressor.rs//! Types of supported compression algorithms
use std::io::{Cursor, Read, Write};
use deku::prelude::*;
#[cfg(feature = "any-flate2")]
use flate2::read::ZlibEncoder;
#[cfg(feature = "any-flate2")]
use flate2::Compression;
#[cfg(feature = "xz")]
use liblzma::read::{XzDecoder, XzEncoder};
#[cfg(feature = "xz")]
use liblzma::stream::{Check, Filters, LzmaOptions, MtStreamBuilder};
use tracing::trace;
use crate::error::BackhandError;
use crate::filesystem::writer::{CompressionExtra, FilesystemCompressor};
use crate::kind::Kind;
use crate::metadata::MetadataWriter;
use crate::squashfs::Flags;
use crate::SuperBlock;
#[derive(Copy, Clone, Debug, PartialEq, Eq, DekuRead, DekuWrite, Default)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
#[deku(id_type = "u16")]
#[repr(u16)]
#[rustfmt::skip]
pub enum Compressor {
None = 0,
Gzip = 1,
Lzma = 2,
Lzo = 3,
#[default]
Xz = 4,
Lz4 = 5,
Zstd = 6,
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian, compressor: Compressor")]
#[deku(id = "compressor")]
pub enum CompressionOptions {
#[deku(id = "Compressor::Gzip")]
Gzip(Gzip),
#[deku(id = "Compressor::Lzo")]
Lzo(Lzo),
#[deku(id = "Compressor::Xz")]
Xz(Xz),
#[deku(id = "Compressor::Lz4")]
Lz4(Lz4),
#[deku(id = "Compressor::Zstd")]
Zstd(Zstd),
#[deku(id = "Compressor::Lzma")]
Lzma,
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct Gzip {
pub compression_level: u32,
pub window_size: u16,
// TODO: enum
pub strategies: u16,
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct Lzo {
// TODO: enum
pub algorithm: u32,
pub compression_level: u32,
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct Xz {
pub dictionary_size: u32,
pub filters: XzFilter,
// the rest of these fields are from OpenWRT. These are optional, as the kernel will ignore
// these fields when seen. We follow the same behaviour and don't attempt to parse if the bytes
// for these aren't found
// TODO: both are currently unused in this library
// TODO: in openwrt, git-hash:f97ad870e11ebe5f3dcf833dda6c83b9165b37cb shows that before
// official squashfs-tools had xz support they had the dictionary_size field as the last field
// in this struct. If we get test images, I guess we can support this in the future.
#[deku(cond = "!deku::reader.end()")]
pub bit_opts: Option<u16>,
#[deku(cond = "!deku::reader.end()")]
pub fb: Option<u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, DekuRead, DekuWrite)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct XzFilter(u32);
impl XzFilter {
fn x86(&self) -> bool {
self.0 & 0x0001 == 0x0001
}
fn powerpc(&self) -> bool {
self.0 & 0x0002 == 0x0002
}
fn ia64(&self) -> bool {
self.0 & 0x0004 == 0x0004
}
fn arm(&self) -> bool {
self.0 & 0x0008 == 0x0008
}
fn armthumb(&self) -> bool {
self.0 & 0x0010 == 0x0010
}
fn sparc(&self) -> bool {
self.0 & 0x0020 == 0x0020
}
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct Lz4 {
pub version: u32,
//TODO: enum
pub flags: u32,
}
#[derive(Debug, DekuRead, DekuWrite, PartialEq, Eq, Clone, Copy)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct Zstd {
pub compression_level: u32,
}
/// Custom Compression support
///
/// For most instances, one should just use the [`DefaultCompressor`]. This will correctly
/// implement the Squashfs found within `squashfs-tools` and the Linux kernel.
///
/// However, the "wonderful world of vendor formats" has other ideas and has implemented their own
/// ideas of compression with custom tables and such! Thus, if the need arises you can implement
/// your own [`CompressionAction`] to override the compression and de-compression used in this
/// library by default.
pub trait CompressionAction {
/// Decompress function used for all decompression actions
///
/// # Arguments
///
/// * `bytes` - Input compressed bytes
/// * `out` - Output uncompressed bytes. You will need to call `out.resize(out.capacity(), 0)`
/// if your compressor relies on having a max sized buffer to write into.
/// * `compressor` - Compressor id from [SuperBlock]. This can be ignored if your custom
/// compressor doesn't follow the normal values of the Compressor Id.
///
/// [SuperBlock]: [`crate::SuperBlock`]
fn decompress(
&self,
bytes: &[u8],
out: &mut Vec<u8>,
compressor: Compressor,
) -> Result<(), BackhandError>;
/// Compression function used for all compression actions
///
/// # Arguments
/// * `bytes` - Input uncompressed bytes
/// * `fc` - Information from both the derived image and options added during compression
/// * `block_size` - Block size from [SuperBlock]
///
/// [SuperBlock]: [`crate::SuperBlock`]
fn compress(
&self,
bytes: &[u8],
fc: FilesystemCompressor,
block_size: u32,
) -> Result<Vec<u8>, BackhandError>;
/// Compression Options for non-default compression specific options
///
/// This function is called when calling [FilesystemWriter::write](crate::FilesystemWriter::write), and the returned bytes are the
/// section right after the SuperBlock.
///
/// # Arguments
/// * `superblock` - Mutatable squashfs superblock info that will be written to disk after
/// this function is called. The fields `inode_count`, `block_size`,
/// `block_log` and `mod_time` *will* be set to `FilesystemWriter` options and can be trusted
/// in this function.
/// * `kind` - Kind information
/// * `fs_compressor` - Compression Options
fn compression_options(
&self,
superblock: &mut SuperBlock,
kind: &Kind,
fs_compressor: FilesystemCompressor,
) -> Result<Vec<u8>, BackhandError>;
}
/// Default compressor that handles the compression features that are enabled
#[derive(Copy, Clone)]
pub struct DefaultCompressor;
impl CompressionAction for DefaultCompressor {
/// Using the current compressor from the superblock, decompress bytes
fn decompress(
&self,
bytes: &[u8],
out: &mut Vec<u8>,
compressor: Compressor,
) -> Result<(), BackhandError> {
match compressor {
Compressor::None => out.extend_from_slice(bytes),
#[cfg(feature = "any-flate2")]
Compressor::Gzip => {
let mut decoder = flate2::read::ZlibDecoder::new(bytes);
decoder.read_to_end(out)?;
}
#[cfg(feature = "xz")]
Compressor::Xz => {
let mut decoder = XzDecoder::new(bytes);
decoder.read_to_end(out)?;
}
#[cfg(feature = "lzo")]
Compressor::Lzo => {
out.resize(out.capacity(), 0);
let (out_size, error) = rust_lzo::LZOContext::decompress_to_slice(bytes, out);
let out_size = out_size.len();
out.truncate(out_size);
if error != rust_lzo::LZOError::OK {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
}
#[cfg(feature = "zstd")]
Compressor::Zstd => {
let mut decoder = zstd::bulk::Decompressor::new().unwrap();
decoder.decompress_to_buffer(bytes, out)?;
}
#[cfg(feature = "lz4")]
Compressor::Lz4 => {
out.resize(out.capacity(), 0u8);
let out_size = lz4_flex::decompress_into(bytes, out.as_mut_slice()).unwrap();
out.truncate(out_size);
}
_ => return Err(BackhandError::UnsupportedCompression(compressor)),
}
Ok(())
}
/// Using the current compressor from the superblock, compress bytes
fn compress(
&self,
bytes: &[u8],
fc: FilesystemCompressor,
block_size: u32,
) -> Result<Vec<u8>, BackhandError> {
match (fc.id, fc.options, fc.extra) {
(Compressor::None, None, _) => Ok(bytes.to_vec()),
#[cfg(feature = "xz")]
(Compressor::Xz, option @ (Some(CompressionOptions::Xz(_)) | None), extra) => {
let dict_size = match option {
None => block_size,
Some(CompressionOptions::Xz(option)) => option.dictionary_size,
Some(_) => unreachable!(),
};
let default_level = 6; // LZMA_DEFAULT
let level = match extra {
None => default_level,
Some(CompressionExtra::Xz(xz)) => {
if let Some(level) = xz.level {
level
} else {
default_level
}
}
};
let check = Check::Crc32;
let mut opts = LzmaOptions::new_preset(level).unwrap();
opts.dict_size(dict_size);
let mut filters = Filters::new();
if let Some(CompressionOptions::Xz(xz)) = option {
if xz.filters.x86() {
filters.x86();
}
if xz.filters.powerpc() {
filters.powerpc();
}
if xz.filters.ia64() {
filters.ia64();
}
if xz.filters.arm() {
filters.arm();
}
if xz.filters.armthumb() {
filters.arm_thumb();
}
if xz.filters.sparc() {
filters.sparc();
}
}
filters.lzma2(&opts);
let stream = MtStreamBuilder::new()
.threads(2)
.filters(filters)
.check(check)
.encoder()
.unwrap();
let mut encoder = XzEncoder::new_stream(Cursor::new(bytes), stream);
let mut buf = vec![];
encoder.read_to_end(&mut buf)?;
Ok(buf)
}
#[cfg(feature = "any-flate2")]
(Compressor::Gzip, option @ (Some(CompressionOptions::Gzip(_)) | None), _) => {
let compression_level = match option {
None => Compression::best(), // 9
Some(CompressionOptions::Gzip(option)) => {
Compression::new(option.compression_level)
}
Some(_) => unreachable!(),
};
// TODO(#8): Use window_size and strategies (current window size defaults to 15)
let mut encoder = ZlibEncoder::new(Cursor::new(bytes), compression_level);
let mut buf = vec![];
encoder.read_to_end(&mut buf)?;
Ok(buf)
}
#[cfg(feature = "lzo")]
(Compressor::Lzo, _, _) => {
let mut lzo = rust_lzo::LZOContext::new();
let mut buf = vec![0; rust_lzo::worst_compress(bytes.len())];
let error = lzo.compress(bytes, &mut buf);
if error != rust_lzo::LZOError::OK {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
Ok(buf)
}
#[cfg(feature = "zstd")]
(Compressor::Zstd, option @ (Some(CompressionOptions::Zstd(_)) | None), _) => {
let compression_level = match option {
None => 3,
Some(CompressionOptions::Zstd(option)) => option.compression_level,
Some(_) => unreachable!(),
};
let mut encoder = zstd::bulk::Compressor::new(compression_level as i32)?;
let buffer_len = zstd_safe::compress_bound(bytes.len());
let mut buf = Vec::with_capacity(buffer_len);
encoder.compress_to_buffer(bytes, &mut buf)?;
Ok(buf)
}
#[cfg(feature = "lz4")]
(Compressor::Lz4, _option, _) => Ok(lz4_flex::compress(bytes)),
_ => Err(BackhandError::UnsupportedCompression(fc.id)),
}
}
/// Using the current compressor options, create compression options
fn compression_options(
&self,
superblock: &mut SuperBlock,
kind: &Kind,
fs_compressor: FilesystemCompressor,
) -> Result<Vec<u8>, BackhandError> {
let mut w = Cursor::new(vec![]);
// Write compression options, if any
if let Some(options) = &fs_compressor.options {
trace!("writing compression options");
superblock.flags |= Flags::CompressorOptionsArePresent as u16;
let mut compression_opt_buf_out = Cursor::new(vec![]);
let mut writer = Writer::new(&mut compression_opt_buf_out);
match options {
CompressionOptions::Gzip(gzip) => {
gzip.to_writer(&mut writer, kind.inner.type_endian)?
}
CompressionOptions::Lz4(lz4) => {
lz4.to_writer(&mut writer, kind.inner.type_endian)?
}
CompressionOptions::Zstd(zstd) => {
zstd.to_writer(&mut writer, kind.inner.type_endian)?
}
CompressionOptions::Xz(xz) => xz.to_writer(&mut writer, kind.inner.type_endian)?,
CompressionOptions::Lzo(lzo) => {
lzo.to_writer(&mut writer, kind.inner.type_endian)?
}
CompressionOptions::Lzma => {}
}
let mut metadata = MetadataWriter::new(
fs_compressor,
superblock.block_size,
Kind { inner: kind.inner.clone() },
);
metadata.write_all(compression_opt_buf_out.get_ref())?;
metadata.finalize(&mut w)?;
}
Ok(w.into_inner())
}
}
0707010000003B000081A40000000000000000000000016854DB950000308D000000000000000000000000000000000000002500000000backhand-0.23.0/backhand/src/data.rs//! File Data
use std::collections::HashMap;
use std::io::{Read, Seek, Write};
use deku::prelude::*;
use solana_nohash_hasher::IntMap;
use tracing::trace;
use xxhash_rust::xxh64::xxh64;
use crate::compressor::CompressionAction;
use crate::error::BackhandError;
use crate::filesystem::writer::FilesystemCompressor;
use crate::fragment::Fragment;
use crate::reader::WriteSeek;
#[cfg(not(feature = "parallel"))]
use crate::filesystem::reader_no_parallel::SquashfsRawData;
#[cfg(feature = "parallel")]
use crate::filesystem::reader_parallel::SquashfsRawData;
// bitflag for data size field in inode for signifying that the data is uncompressed
const DATA_STORED_UNCOMPRESSED: u32 = 1 << 24;
#[derive(Copy, Clone, Debug, PartialEq, Eq, DekuRead, DekuWrite)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct DataSize(u32);
impl DataSize {
#[inline]
pub fn new(size: u32, uncompressed: bool) -> Self {
let mut value: u32 = size;
if value > DATA_STORED_UNCOMPRESSED {
panic!("value is too big");
}
if uncompressed {
value |= DATA_STORED_UNCOMPRESSED;
}
Self(value)
}
#[inline]
pub fn new_compressed(size: u32) -> Self {
Self::new(size, false)
}
#[inline]
pub fn new_uncompressed(size: u32) -> Self {
Self::new(size, true)
}
#[inline]
pub fn uncompressed(&self) -> bool {
self.0 & DATA_STORED_UNCOMPRESSED != 0
}
#[inline]
pub fn set_uncompressed(&mut self) {
self.0 |= DATA_STORED_UNCOMPRESSED
}
#[inline]
pub fn set_compressed(&mut self) {
self.0 &= !DATA_STORED_UNCOMPRESSED
}
#[inline]
pub fn size(&self) -> u32 {
self.0 & !DATA_STORED_UNCOMPRESSED
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Added {
// Only Data was added
Data { blocks_start: u32, block_sizes: Vec<DataSize> },
// Only Fragment was added
Fragment { frag_index: u32, block_offset: u32 },
}
struct DataWriterChunkReader<R: std::io::Read> {
chunk: Vec<u8>,
file_len: usize,
reader: R,
}
impl<R: std::io::Read> DataWriterChunkReader<R> {
pub fn read_chunk(&mut self) -> std::io::Result<&[u8]> {
use std::io::ErrorKind;
let mut buf: &mut [u8] = &mut self.chunk;
let mut read_len = 0;
while !buf.is_empty() {
match self.reader.read(buf) {
Ok(0) => break,
Ok(n) => {
read_len += n;
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
self.file_len += read_len;
Ok(&self.chunk[..read_len])
}
}
pub(crate) struct DataWriter<'a> {
kind: &'a dyn CompressionAction,
block_size: u32,
fs_compressor: FilesystemCompressor,
/// If some, cache of HashMap<file_len, HashMap<hash, (file_len, Added)>>
#[allow(clippy::type_complexity)]
dup_cache: Option<IntMap<u64, IntMap<u64, (usize, Added)>>>,
/// Un-written fragment_bytes
pub(crate) fragment_bytes: Vec<u8>,
pub(crate) fragment_table: Vec<Fragment>,
}
impl<'a> DataWriter<'a> {
pub fn new(
kind: &'a dyn CompressionAction,
fs_compressor: FilesystemCompressor,
block_size: u32,
no_duplicate_files: bool,
) -> Self {
Self {
kind,
block_size,
fs_compressor,
dup_cache: no_duplicate_files.then_some(HashMap::default()),
fragment_bytes: Vec::with_capacity(block_size as usize),
fragment_table: vec![],
}
}
/// Add to data writer, either a pre-compressed Data or Fragment
// TODO: support tail-end fragments (off by default in squashfs-tools/mksquashfs)
pub(crate) fn just_copy_it<W: WriteSeek>(
&mut self,
mut reader: SquashfsRawData,
mut writer: W,
) -> Result<(usize, Added), BackhandError> {
//just clone it, because block sizes where never modified, just copy it
let mut block_sizes = reader.file.file.block_sizes().to_vec();
let mut read_buf = vec![];
let mut decompress_buf = vec![];
// if the first block is not full (fragment), store only a fragment
// otherwise processed to store blocks
let blocks_start = writer.stream_position()? as u32;
let first_block = match reader.next_block(&mut read_buf) {
Some(Ok(first_block)) => first_block,
Some(Err(x)) => return Err(x),
None => return Ok((0, Added::Data { blocks_start, block_sizes })),
};
// write and early return if fragment
if first_block.fragment {
reader.decompress(first_block, &mut read_buf, &mut decompress_buf)?;
// if this doesn't fit in the current fragment bytes
// compress the current fragment bytes and add to data_bytes
if (decompress_buf.len() + self.fragment_bytes.len()) > self.block_size as usize {
self.finalize(writer)?;
}
// add to fragment bytes
let frag_index = self.fragment_table.len() as u32;
let block_offset = self.fragment_bytes.len() as u32;
self.fragment_bytes.write_all(&decompress_buf)?;
return Ok((decompress_buf.len(), Added::Fragment { frag_index, block_offset }));
}
//if is a block, just copy it
writer.write_all(&read_buf)?;
while let Some(block) = reader.next_block(&mut read_buf) {
let block = block?;
if block.fragment {
reader.decompress(block, &mut read_buf, &mut decompress_buf)?;
// TODO: support tail-end fragments, for now just treat it like a block
let cb =
self.kind.compress(&decompress_buf, self.fs_compressor, self.block_size)?;
// compression didn't reduce size
if cb.len() > decompress_buf.len() {
// store uncompressed
block_sizes.push(DataSize::new_uncompressed(decompress_buf.len() as u32));
writer.write_all(&decompress_buf)?;
} else {
// store compressed
block_sizes.push(DataSize::new_compressed(cb.len() as u32));
writer.write_all(&cb)?;
}
} else {
//if is a block, just copy it
writer.write_all(&read_buf)?;
}
}
let file_size = reader.file.file.file_len();
Ok((file_size, Added::Data { blocks_start, block_sizes }))
}
/// Add to data writer, either a Data or Fragment
///
/// If `self.dup_cache` is on, return alrady added `(usize, Added)` if duplicate
/// is found
// TODO: support tail-end fragments (off by default in squashfs-tools/mksquashfs)
pub(crate) fn add_bytes<W: WriteSeek>(
&mut self,
reader: impl Read,
mut writer: W,
) -> Result<(usize, Added), BackhandError> {
let mut chunk_reader = DataWriterChunkReader {
chunk: vec![0u8; self.block_size as usize],
file_len: 0,
reader,
};
// read entire chunk (file)
let mut chunk = chunk_reader.read_chunk()?;
// chunk size not exactly the size of the block
if chunk.len() != self.block_size as usize {
// if this doesn't fit in the current fragment bytes
// compress the current fragment bytes and add to data_bytes
if (chunk.len() + self.fragment_bytes.len()) > self.block_size as usize {
self.finalize(writer)?;
}
// add to fragment bytes
let frag_index = self.fragment_table.len() as u32;
let block_offset = self.fragment_bytes.len() as u32;
self.fragment_bytes.write_all(chunk)?;
return Ok((chunk_reader.file_len, Added::Fragment { frag_index, block_offset }));
}
// Add to data bytes
let blocks_start = writer.stream_position()? as u32;
let mut block_sizes = vec![];
// If duplicate file checking is enabled, use the old data position as this file if it hashes the same
if let Some(dup_cache) = &self.dup_cache {
if let Some(c) = dup_cache.get(&(chunk.len() as u64)) {
let hash = xxh64(chunk, 0);
if let Some(res) = c.get(&hash) {
trace!("duplicate file data found");
return Ok(res.clone());
}
}
}
// Save information needed to add to duplicate_cache later
let chunk_len = chunk.len();
let hash = xxh64(chunk, 0);
while !chunk.is_empty() {
let cb = self.kind.compress(chunk, self.fs_compressor, self.block_size)?;
// compression didn't reduce size
if cb.len() > chunk.len() {
// store uncompressed
block_sizes.push(DataSize::new_uncompressed(chunk.len() as u32));
writer.write_all(chunk)?;
} else {
// store compressed
block_sizes.push(DataSize::new_compressed(cb.len() as u32));
writer.write_all(&cb)?;
}
chunk = chunk_reader.read_chunk()?;
}
// Add to duplicate information cache
let added = (chunk_reader.file_len, Added::Data { blocks_start, block_sizes });
// If duplicate files checking is enbaled, then add this to it's memory
if let Some(dup_cache) = &mut self.dup_cache {
if let Some(entry) = dup_cache.get_mut(&(chunk_len as u64)) {
entry.insert(hash, added.clone());
} else {
let mut hashmap = IntMap::default();
hashmap.insert(hash, added.clone());
dup_cache.insert(chunk_len as u64, hashmap);
}
}
Ok(added)
}
/// Compress the fragments that were under length, write to data, add to fragment table, clear
/// current fragment_bytes
pub fn finalize<W: Write + Seek>(&mut self, mut writer: W) -> Result<(), BackhandError> {
let start = writer.stream_position()?;
let cb = self.kind.compress(&self.fragment_bytes, self.fs_compressor, self.block_size)?;
// compression didn't reduce size
let size = if cb.len() > self.fragment_bytes.len() {
// store uncompressed
writer.write_all(&self.fragment_bytes)?;
DataSize::new_uncompressed(self.fragment_bytes.len() as u32)
} else {
// store compressed
writer.write_all(&cb)?;
DataSize::new_compressed(cb.len() as u32)
};
self.fragment_table.push(Fragment::new(start, size, 0));
self.fragment_bytes.clear();
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::*;
use crate::{
compression::{Compressor, DefaultCompressor},
DEFAULT_BLOCK_SIZE,
};
#[test]
#[cfg(feature = "gzip")]
fn test_duplicate_check() {
let mut data_writer = DataWriter::new(
&DefaultCompressor,
FilesystemCompressor::new(Compressor::Gzip, None).unwrap(),
DEFAULT_BLOCK_SIZE,
true,
);
let bytes = [0xff_u8; DEFAULT_BLOCK_SIZE as usize * 2];
let mut writer = Cursor::new(vec![]);
let added_1 = data_writer.add_bytes(&bytes[..], &mut writer).unwrap();
let added_2 = data_writer.add_bytes(&bytes[..], &mut writer).unwrap();
assert_eq!(added_1, added_2);
}
#[test]
#[cfg(feature = "gzip")]
fn test_no_duplicate_check() {
let mut data_writer = DataWriter::new(
&DefaultCompressor,
FilesystemCompressor::new(Compressor::Gzip, None).unwrap(),
DEFAULT_BLOCK_SIZE,
false,
);
let bytes = [0xff_u8; DEFAULT_BLOCK_SIZE as usize * 2];
let mut writer = Cursor::new(vec![]);
let added_1 = data_writer.add_bytes(&bytes[..], &mut writer).unwrap();
let added_2 = data_writer.add_bytes(&bytes[..], &mut writer).unwrap();
assert_ne!(added_1, added_2);
}
}
0707010000003C000081A40000000000000000000000016854DB95000010AA000000000000000000000000000000000000002400000000backhand-0.23.0/backhand/src/dir.rs//! Storage of directories with references to inodes
//!
//! For each directory inode, the directory table stores a linear list of all entries,
//! with references back to the inodes that describe those entries.
use std::ffi::OsStr;
use std::path::{Component, Path};
use deku::prelude::*;
use crate::inode::InodeId;
use crate::unix_string::OsStrExt;
use crate::BackhandError;
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(ctx = "type_endian: deku::ctx::Endian")]
#[deku(endian = "type_endian")]
pub struct Dir {
/// Number of entries following the header.
///
/// A header must be followed by AT MOST 256 entries. If there are more entries, a new header MUST be emitted.
#[deku(assert = "*count <= 256")]
pub(crate) count: u32,
/// The location of the metadata block in the inode table where the inodes are stored.
/// This is relative to the inode table start from the super block.
pub(crate) start: u32,
/// An arbitrary inode number.
/// The entries that follow store their inode number as a difference to this.
pub(crate) inode_num: u32,
#[deku(count = "*count + 1")]
pub(crate) dir_entries: Vec<DirEntry>,
}
impl Dir {
pub fn new(lowest_inode: u32) -> Self {
Self {
count: u32::default(),
start: u32::default(),
inode_num: lowest_inode,
dir_entries: vec![],
}
}
pub fn push(&mut self, entry: DirEntry) {
self.dir_entries.push(entry);
self.count = (self.dir_entries.len() - 1) as u32;
}
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct DirEntry {
/// An offset into the uncompressed inode metadata block.
pub(crate) offset: u16,
/// The difference of this inode’s number to the reference stored in the header.
pub(crate) inode_offset: i16,
/// The inode type. For extended inodes, the basic type is stored here instead.
pub(crate) t: InodeId,
/// One less than the size of the entry name.
pub(crate) name_size: u16,
// TODO: CString
/// The file name of the entry without a trailing null byte. Has name size + 1 bytes.
#[deku(count = "*name_size + 1")]
pub(crate) name: Vec<u8>,
}
impl DirEntry {
pub fn name(&self) -> Result<&Path, BackhandError> {
// allow root and nothing else
if self.name == Component::RootDir.as_os_str().as_bytes() {
return Ok(Path::new(Component::RootDir.as_os_str()));
}
let path = Path::new(OsStr::from_bytes(&self.name));
// if not a simple filename, return an error
let filename = path.file_name().map(OsStrExt::as_bytes);
if filename != Some(&self.name) {
return Err(BackhandError::InvalidFilePath);
}
Ok(path)
}
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct DirectoryIndex {
/// This stores a byte offset from the first directory header to the current header,
/// as if the uncompressed directory metadata blocks were laid out in memory consecutively.
pub(crate) index: u32,
/// Start offset of a directory table metadata block, relative to the directory table start.
pub(crate) start: u32,
#[deku(assert = "*name_size < 256")]
pub(crate) name_size: u32,
#[deku(count = "*name_size + 1")]
pub(crate) name: Vec<u8>,
}
impl DirectoryIndex {
pub fn name(&self) -> String {
std::str::from_utf8(&self.name).unwrap().to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn no_invalid_dir_entry() {
// just root
let dir = DirEntry {
offset: 0x300,
inode_offset: 0x0,
t: InodeId::BasicDirectory,
name_size: 0x1,
name: b"/".to_vec(),
};
assert_eq!(Path::new("/"), dir.name().unwrap());
// InvalidFilePath
let dir = DirEntry {
offset: 0x300,
inode_offset: 0x0,
t: InodeId::BasicDirectory,
name_size: 0x1,
name: b"/nice/".to_vec(),
};
assert!(dir.name().is_err());
}
}
0707010000003D000081A40000000000000000000000016854DB9500003A1C000000000000000000000000000000000000002600000000backhand-0.23.0/backhand/src/entry.rsuse std::ffi::OsStr;
use std::fmt;
use crate::data::Added;
use crate::dir::{Dir, DirEntry};
use crate::inode::{
BasicDeviceSpecialFile, BasicDirectory, BasicFile, BasicSymlink, ExtendedDirectory, IPCNode,
Inode, InodeHeader, InodeId, InodeInner,
};
use crate::kinds::Kind;
use crate::metadata::MetadataWriter;
use crate::squashfs::SuperBlock;
use crate::unix_string::OsStrExt;
use crate::{Id, NodeHeader, SquashfsBlockDevice, SquashfsCharacterDevice, SquashfsSymlink};
#[derive(Clone)]
pub(crate) struct Entry<'a> {
pub start: u32,
pub offset: u16,
pub inode: u32,
pub t: InodeId,
pub name_size: u16,
pub name: &'a [u8],
}
impl<'a> Entry<'a> {
pub fn name(&self) -> String {
std::str::from_utf8(self.name).unwrap().to_string()
}
/// Write data and metadata for path node (Basic Directory or ExtendedDirectory)
#[allow(clippy::too_many_arguments)]
pub fn path(
name: &'a OsStr,
header: NodeHeader,
inode: u32,
children_num: usize,
parent_inode: u32,
inode_writer: &mut MetadataWriter,
file_size: usize,
block_offset: u16,
block_index: u32,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
// if entry won't fit in file_size of regular dir entry, create extended directory
let dir_inode = if file_size > u16::MAX as usize {
Inode::new(
InodeId::ExtendedDirectory,
header,
InodeInner::ExtendedDirectory(ExtendedDirectory {
link_count: 2 + u32::try_from(children_num).unwrap(),
file_size: file_size.try_into().unwrap(), // u32
block_index,
parent_inode,
// TODO: Support Directory Index
index_count: 0,
block_offset,
// TODO(#32): Support xattr
xattr_index: 0xffff_ffff,
// TODO: Support Directory Index
dir_index: vec![],
}),
)
} else {
Inode::new(
InodeId::BasicDirectory,
header,
InodeInner::BasicDirectory(BasicDirectory {
block_index,
link_count: 2 + u32::try_from(children_num).unwrap(),
file_size: file_size.try_into().unwrap(), // u16
block_offset,
parent_inode,
}),
)
};
dir_inode.to_bytes(name.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for file node
#[allow(clippy::too_many_arguments)]
pub fn file(
node_path: &'a OsStr,
header: NodeHeader,
inode: u32,
inode_writer: &mut MetadataWriter,
file_size: usize,
added: &Added,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let basic_file = match added {
Added::Data { blocks_start, block_sizes } => {
BasicFile {
blocks_start: *blocks_start,
frag_index: 0xffffffff, // <- no fragment
block_offset: 0x0, // <- no fragment
file_size: file_size.try_into().unwrap(),
block_sizes: block_sizes.to_vec(),
}
}
Added::Fragment { frag_index, block_offset } => BasicFile {
blocks_start: 0,
frag_index: *frag_index,
block_offset: *block_offset,
file_size: file_size.try_into().unwrap(),
block_sizes: vec![],
},
};
let file_inode = Inode::new(InodeId::BasicFile, header, InodeInner::BasicFile(basic_file));
file_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for symlink node
#[allow(clippy::too_many_arguments)]
pub fn symlink(
node_path: &'a OsStr,
header: NodeHeader,
symlink: &SquashfsSymlink,
inode: u32,
inode_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let link = symlink.link.as_os_str().as_bytes();
let sym_inode = Inode::new(
InodeId::BasicSymlink,
header,
InodeInner::BasicSymlink(BasicSymlink {
link_count: 0x1,
target_size: link.len().try_into().unwrap(),
target_path: link.to_vec(),
}),
);
sym_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for char device node
#[allow(clippy::too_many_arguments)]
pub fn char(
node_path: &'a OsStr,
header: NodeHeader,
char_device: &SquashfsCharacterDevice,
inode: u32,
inode_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let char_inode = Inode::new(
InodeId::BasicCharacterDevice,
header,
InodeInner::BasicCharacterDevice(BasicDeviceSpecialFile {
link_count: 0x1,
device_number: char_device.device_number,
}),
);
char_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for block device node
#[allow(clippy::too_many_arguments)]
pub fn block_device(
node_path: &'a OsStr,
header: NodeHeader,
block_device: &SquashfsBlockDevice,
inode: u32,
inode_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let block_inode = Inode::new(
InodeId::BasicBlockDevice,
header,
InodeInner::BasicBlockDevice(BasicDeviceSpecialFile {
link_count: 0x1,
device_number: block_device.device_number,
}),
);
block_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for named pipe node
#[allow(clippy::too_many_arguments)]
pub fn named_pipe(
node_path: &'a OsStr,
header: NodeHeader,
inode: u32,
inode_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let char_inode = Inode::new(
InodeId::BasicNamedPipe,
header,
InodeInner::BasicNamedPipe(IPCNode { link_count: 0x1 }),
);
char_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
/// Write data and metadata for socket
#[allow(clippy::too_many_arguments)]
pub fn socket(
node_path: &'a OsStr,
header: NodeHeader,
inode: u32,
inode_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
id_table: &[Id],
) -> Self {
let uid = id_table.iter().position(|a| a.num == header.uid).unwrap() as u16;
let gid = id_table.iter().position(|a| a.num == header.gid).unwrap() as u16;
let header = InodeHeader {
inode_number: inode,
uid,
gid,
permissions: header.permissions,
mtime: header.mtime,
};
let char_inode = Inode::new(
InodeId::BasicSocket,
header,
InodeInner::BasicSocket(IPCNode { link_count: 0x1 }),
);
char_inode.to_bytes(node_path.as_bytes(), inode_writer, superblock, kind)
}
}
impl fmt::Debug for Entry<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Entry")
.field("start", &self.start)
.field("offset", &self.offset)
.field("inode", &self.inode)
.field("t", &self.t)
.field("name_size", &self.name_size)
.field("name", &self.name())
.finish()
}
}
impl Entry<'_> {
fn create_dir(creating_dir: &Vec<&Self>, start: u32, lowest_inode: u32) -> Dir {
let mut dir = Dir::new(lowest_inode);
dir.count = creating_dir.len().try_into().unwrap();
if dir.count >= 256 {
panic!("dir.count({}) >= 256:", dir.count);
}
dir.start = start;
for e in creating_dir {
let inode = e.inode;
let new_entry = DirEntry {
offset: e.offset,
inode_offset: (inode - lowest_inode).try_into().unwrap(),
t: e.t.into_base_type(),
name_size: e.name_size,
name: e.name.to_vec(),
};
dir.push(new_entry);
}
dir
}
/// Create entries, input need to be alphabetically sorted
pub(crate) fn into_dir(entries: Vec<Self>) -> Vec<Dir> {
let mut dirs = vec![];
let mut creating_dir = vec![];
let mut lowest_inode = u32::MAX;
let mut iter = entries.iter().peekable();
let mut creating_start = if let Some(entry) = iter.peek() {
entry.start
} else {
return vec![];
};
while let Some(e) = iter.next() {
if e.inode < lowest_inode {
lowest_inode = e.inode;
}
creating_dir.push(e);
// last entry
if let Some(next) = &iter.peek() {
// if the next entry would be > the lowest_inode
let max_inode = (next.inode as u64).abs_diff(lowest_inode as u64) > i16::MAX as u64;
// make sure entries have the correct start and amount of directories
if next.start != creating_start || creating_dir.len() >= 255 || max_inode {
let dir = Self::create_dir(&creating_dir, creating_start, lowest_inode);
dirs.push(dir);
creating_dir = vec![];
creating_start = next.start;
lowest_inode = u32::MAX;
}
}
// last entry
if iter.peek().is_none() {
let dir = Self::create_dir(&creating_dir, creating_start, lowest_inode);
dirs.push(dir);
}
}
dirs
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_entry() {
let entries = vec![
Entry {
start: 0,
offset: 0x100,
inode: 1,
t: InodeId::BasicDirectory,
name_size: 0x01,
name: b"aa",
},
Entry {
start: 1,
offset: 0x300,
inode: 5,
t: InodeId::BasicDirectory,
name_size: 0x01,
name: b"bb",
},
Entry {
start: 1,
offset: 0x200,
inode: 6,
t: InodeId::BasicDirectory,
name_size: 0x01,
name: b"zz",
},
];
let dir = Entry::into_dir(entries);
assert_eq!(
vec![
Dir {
count: 0x0,
start: 0x0,
inode_num: 0x1,
dir_entries: vec![DirEntry {
offset: 0x100,
inode_offset: 0x0,
t: InodeId::BasicDirectory,
name_size: 0x1,
name: b"aa".to_vec(),
},],
},
Dir {
count: 0x1,
start: 0x1,
inode_num: 0x5,
dir_entries: vec![
DirEntry {
offset: 0x300,
inode_offset: 0x0,
t: InodeId::BasicDirectory,
name_size: 0x1,
name: b"bb".to_vec(),
},
DirEntry {
offset: 0x200,
inode_offset: 0x1,
t: InodeId::BasicDirectory,
name_size: 0x1,
name: b"zz".to_vec(),
},
],
},
],
dir
);
}
}
0707010000003E000081A40000000000000000000000016854DB950000094B000000000000000000000000000000000000002600000000backhand-0.23.0/backhand/src/error.rs//! Errors
use std::collections::TryReserveError;
use std::{io, string};
use thiserror::Error;
use crate::compressor::Compressor;
use crate::inode::InodeInner;
/// Errors generated from library
#[derive(Error, Debug)]
pub enum BackhandError {
#[error("std io error: {0}")]
StdIo(#[from] io::Error),
#[error("deku error: {0:?}")]
Deku(#[from] deku::DekuError),
#[error("string error: {0:?}")]
StringUtf8(#[from] string::FromUtf8Error),
#[error("string error: {0:?}")]
StrUtf8(#[from] std::str::Utf8Error),
#[error("unsupported compression: {0:?}")]
UnsupportedCompression(Compressor),
#[error("file not found")]
FileNotFound,
#[error("branch was thought to be unreachable")]
Unreachable,
#[error("inode {0:?} was unexpected in this position")]
UnexpectedInode(InodeInner),
#[error("unsupported inode: {0:?}, please fill github issue to add support")]
UnsupportedInode(InodeInner),
#[error("corrupted or invalid squashfs image")]
CorruptedOrInvalidSquashfs,
#[error("invalid squashfs compression options")]
InvalidCompressionOption,
#[error("Invalid file path in the squashfs image")]
InvalidFilePath,
#[error("file inside squashfs image have no name")]
UndefineFileName,
#[error("file duplicated in squashfs image")]
DuplicatedFileName,
#[error("allocator try_reserve error")]
TryReserveError(#[from] TryReserveError),
#[error("invalid id_table for node")]
InvalidIdTable,
}
impl From<BackhandError> for io::Error {
fn from(value: BackhandError) -> Self {
use BackhandError::*;
match value {
StdIo(io) => io,
StringUtf8(_) => Self::from(io::ErrorKind::InvalidData),
StrUtf8(_) => Self::from(io::ErrorKind::InvalidData),
UnsupportedCompression(_) => Self::from(io::ErrorKind::Unsupported),
FileNotFound => Self::from(io::ErrorKind::NotFound),
Unreachable
| Deku(_)
| UnexpectedInode(_)
| UnsupportedInode(_)
| CorruptedOrInvalidSquashfs
| InvalidCompressionOption
| InvalidFilePath
| UndefineFileName
| DuplicatedFileName
| InvalidIdTable
| TryReserveError(_) => Self::from(io::ErrorKind::InvalidData),
}
}
}
0707010000003F000081A40000000000000000000000016854DB95000000E0000000000000000000000000000000000000002700000000backhand-0.23.0/backhand/src/export.rsuse deku::prelude::*;
/// NFS export support
#[derive(Debug, Copy, Clone, DekuRead, DekuWrite, PartialEq, Eq)]
#[deku(endian = "type_endian", ctx = "type_endian: deku::ctx::Endian")]
pub struct Export {
pub num: u64,
}
07070100000040000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002800000000backhand-0.23.0/backhand/src/filesystem07070100000041000081A40000000000000000000000016854DB9500000439000000000000000000000000000000000000002F00000000backhand-0.23.0/backhand/src/filesystem/mod.rs//! In-memory representation of SquashFS filesystem tree used for writing to image
#[cfg(not(feature = "parallel"))]
pub mod reader_no_parallel;
#[cfg(feature = "parallel")]
pub mod reader_parallel;
pub mod node;
pub mod reader;
pub mod writer;
use std::path::{Component, Path, PathBuf};
use crate::BackhandError;
// normalize the path, always starts with root, solve relative paths and don't
// allow prefix (windows stuff like "C:/")
pub fn normalize_squashfs_path(src: &Path) -> Result<PathBuf, BackhandError> {
//always starts with root "/"
let mut ret = PathBuf::from(Component::RootDir.as_os_str());
for component in src.components() {
match component {
Component::Prefix(..) => return Err(BackhandError::InvalidFilePath),
//ignore, root, always added on creation
Component::RootDir => {}
Component::CurDir => {}
Component::ParentDir => {
ret.pop();
}
Component::Normal(c) => {
ret.push(c);
}
}
}
Ok(ret)
}
07070100000042000081A40000000000000000000000016854DB9500001CAC000000000000000000000000000000000000003000000000backhand-0.23.0/backhand/src/filesystem/node.rsuse core::fmt;
use std::io::Read;
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use super::normalize_squashfs_path;
use crate::data::Added;
use crate::inode::{BasicFile, ExtendedFile, InodeHeader};
use crate::{BackhandError, DataSize, FilesystemReaderFile, Id};
/// File information for Node
#[derive(Debug, PartialEq, Eq, Default, Clone, Copy)]
pub struct NodeHeader {
pub permissions: u16,
/// actual value
pub uid: u32,
/// actual value
pub gid: u32,
pub mtime: u32,
}
impl NodeHeader {
pub fn new(permissions: u16, uid: u32, gid: u32, mtime: u32) -> Self {
Self { permissions, uid, gid, mtime }
}
}
impl NodeHeader {
pub fn from_inode(inode_header: InodeHeader, id_table: &[Id]) -> Result<Self, BackhandError> {
let uid = id_table.get(inode_header.uid as usize).ok_or(BackhandError::InvalidIdTable)?;
let gid = id_table.get(inode_header.gid as usize).ok_or(BackhandError::InvalidIdTable)?;
Ok(Self {
permissions: inode_header.permissions,
uid: uid.num,
gid: gid.num,
mtime: inode_header.mtime,
})
}
}
/// Filesystem Node
#[derive(Clone, Debug)]
pub struct Node<T> {
pub fullpath: PathBuf,
pub header: NodeHeader,
pub inner: InnerNode<T>,
}
impl<T> PartialEq for Node<T> {
fn eq(&self, other: &Self) -> bool {
self.fullpath.eq(&other.fullpath)
}
}
impl<T> Eq for Node<T> {}
impl<T> PartialOrd for Node<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Node<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.fullpath.cmp(&other.fullpath)
}
}
impl<T> Node<T> {
pub(crate) fn new(fullpath: PathBuf, header: NodeHeader, inner: InnerNode<T>) -> Self {
Self { fullpath, header, inner }
}
pub fn new_root(header: NodeHeader) -> Self {
let fullpath = PathBuf::from("/");
let inner = InnerNode::Dir(SquashfsDir::default());
Self { fullpath, header, inner }
}
}
/// Filesystem node
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum InnerNode<T> {
/// Either [`SquashfsFileReader`] or [`SquashfsFileWriter`]
File(T),
Symlink(SquashfsSymlink),
Dir(SquashfsDir),
CharacterDevice(SquashfsCharacterDevice),
BlockDevice(SquashfsBlockDevice),
NamedPipe,
Socket,
}
/// Unread file for filesystem
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SquashfsFileReader {
Basic(BasicFile),
Extended(ExtendedFile),
}
impl SquashfsFileReader {
pub fn file_len(&self) -> usize {
match self {
SquashfsFileReader::Basic(basic) => basic.file_size as usize,
SquashfsFileReader::Extended(extended) => extended.file_size as usize,
}
}
pub fn frag_index(&self) -> usize {
match self {
SquashfsFileReader::Basic(basic) => basic.frag_index as usize,
SquashfsFileReader::Extended(extended) => extended.frag_index as usize,
}
}
pub fn block_sizes(&self) -> &[DataSize] {
match self {
SquashfsFileReader::Basic(basic) => &basic.block_sizes,
SquashfsFileReader::Extended(extended) => &extended.block_sizes,
}
}
pub fn blocks_start(&self) -> u64 {
match self {
SquashfsFileReader::Basic(basic) => basic.blocks_start as u64,
SquashfsFileReader::Extended(extended) => extended.blocks_start,
}
}
pub fn block_offset(&self) -> u32 {
match self {
SquashfsFileReader::Basic(basic) => basic.block_offset,
SquashfsFileReader::Extended(extended) => extended.block_offset,
}
}
}
/// Read file from other SquashfsFile or an user file
pub enum SquashfsFileWriter<'a, 'b, 'c> {
UserDefined(Arc<Mutex<dyn Read + 'c>>),
SquashfsFile(FilesystemReaderFile<'a, 'b>),
Consumed(usize, Added),
}
impl fmt::Debug for SquashfsFileWriter<'_, '_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FileWriter").finish()
}
}
/// Symlink for filesystem
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct SquashfsSymlink {
pub link: PathBuf,
}
/// Directory for filesystem
#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
pub struct SquashfsDir {}
/// Character Device for filesystem
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct SquashfsCharacterDevice {
pub device_number: u32,
}
/// Block Device for filesystem
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct SquashfsBlockDevice {
pub device_number: u32,
}
#[derive(Debug, Clone)]
pub struct Nodes<T> {
pub nodes: Vec<Node<T>>,
}
impl<T> Nodes<T> {
pub fn new_root(header: NodeHeader) -> Self {
Self { nodes: vec![Node::new_root(header)] }
}
pub fn root(&self) -> &Node<T> {
&self.nodes[0]
}
pub fn root_mut(&mut self) -> &mut Node<T> {
&mut self.nodes[0]
}
pub fn node_mut<S: AsRef<Path>>(&mut self, path: S) -> Option<&mut Node<T>> {
//the search path root prefix is optional, so remove it if present to
//not affect the search
let find_path = normalize_squashfs_path(path.as_ref()).ok()?;
self.nodes
.binary_search_by(|node| node.fullpath.cmp(&find_path))
.ok()
.map(|found| &mut self.nodes[found])
}
pub fn insert(&mut self, node: Node<T>) -> Result<(), BackhandError> {
let path = &node.fullpath;
let parent = node.fullpath.parent().ok_or(BackhandError::InvalidFilePath)?;
//check if the parent exists and is a dir
let parent = self.node_mut(parent).ok_or(BackhandError::InvalidFilePath)?;
match &parent.inner {
InnerNode::Dir(_) => {}
_ => return Err(BackhandError::InvalidFilePath),
}
match self.nodes.binary_search_by(|node| node.fullpath.as_path().cmp(path)) {
//file with this fullpath already exists
Ok(_index) => Err(BackhandError::DuplicatedFileName),
//file don't exists, insert it at this location
Err(index) => {
self.nodes.insert(index, node);
Ok(())
}
}
}
fn inner_children_of(&self, node_index: usize) -> Option<&[Node<T>]> {
let parent = &self.nodes[node_index];
let children_start = node_index + 1;
let unbounded_children = self.nodes.get(children_start..)?;
let children_len = unbounded_children
.iter()
.enumerate()
.find(|(_, node)| !node.fullpath.starts_with(&parent.fullpath))
.map(|(index, _)| index)
.unwrap_or(unbounded_children.len());
Some(&unbounded_children[..children_len])
}
pub fn node(&self, node_index: NonZeroUsize) -> Option<&Node<T>> {
self.nodes.get(node_index.get() - 1)
}
pub fn children_of(
&self,
node_index: NonZeroUsize,
) -> impl Iterator<Item = (NonZeroUsize, &Node<T>)> {
self.inner_children_of(node_index.get() - 1).unwrap_or(&[]).iter().enumerate().map(
move |(index, node)| (NonZeroUsize::new(node_index.get() + index + 1).unwrap(), node),
)
}
}
07070100000043000081A40000000000000000000000016854DB95000020D9000000000000000000000000000000000000003200000000backhand-0.23.0/backhand/src/filesystem/reader.rsuse std::sync::{Mutex, RwLock};
use super::node::Nodes;
use crate::compressor::{CompressionOptions, Compressor};
use crate::data::DataSize;
use crate::error::BackhandError;
use crate::fragment::Fragment;
use crate::id::Id;
use crate::kinds::Kind;
use crate::reader::BufReadSeek;
use crate::squashfs::Cache;
use crate::{Node, Squashfs, SquashfsFileReader};
#[cfg(not(feature = "parallel"))]
use crate::filesystem::reader_no_parallel::{SquashfsRawData, SquashfsReadFile};
#[cfg(feature = "parallel")]
use crate::filesystem::reader_parallel::{SquashfsRawData, SquashfsReadFile};
/// Representation of SquashFS filesystem after read from image
/// - Use [`Self::from_reader`] to read into `Self` from a `reader`
///
/// # Read direct into [`Self`]
/// Usual workflow, reading from image into a default squashfs [`Self`]. See [InnerNode] for more
/// details for `.nodes`.
/// ```rust,no_run
/// # use std::fs::File;
/// # use std::io::BufReader;
/// # use backhand::{
/// # FilesystemReader, InnerNode, Squashfs, SquashfsBlockDevice, SquashfsCharacterDevice,
/// # SquashfsDir, SquashfsSymlink,
/// # };
/// // Read into filesystem
/// let file = BufReader::new(File::open("image.squashfs").unwrap());
/// let filesystem = FilesystemReader::from_reader(file).unwrap();
///
/// // Iterate through nodes
/// // (See src/bin/unsquashfs.rs for more examples on extraction)
/// for node in filesystem.files() {
/// // extract
/// match &node.inner {
/// InnerNode::File(_) => (),
/// InnerNode::Symlink(_) => (),
/// InnerNode::Dir(_) => (),
/// InnerNode::CharacterDevice(_) => (),
/// InnerNode::BlockDevice(_) => (),
/// InnerNode::NamedPipe => (),
/// InnerNode::Socket => (),
/// }
/// }
/// ```
///
/// # Read from [`Squashfs`]
/// Performance wise, you may want to read into a [`Squashfs`] first, if for instance you are
/// optionally not extracting and only listing some Superblock fields.
/// ```rust,no_run
/// # use std::fs::File;
/// # use std::io::BufReader;
/// # use backhand::{
/// # FilesystemReader, InnerNode, Squashfs, SquashfsBlockDevice, SquashfsCharacterDevice,
/// # SquashfsDir, SquashfsSymlink,
/// # };
/// // Read into Squashfs
/// let file = BufReader::new(File::open("image.squashfs").unwrap());
/// let squashfs = Squashfs::from_reader_with_offset(file, 0).unwrap();
///
/// // Display the Superblock info
/// let superblock = squashfs.superblock;
/// println!("{superblock:#08x?}");
///
/// // Now read into filesystem
/// let filesystem = squashfs.into_filesystem_reader().unwrap();
/// ```
/// [InnerNode]: [`crate::InnerNode`]
pub struct FilesystemReader<'b> {
pub kind: Kind,
/// The size of a data block in bytes. Must be a power of two between 4096 (4k) and 1048576 (1 MiB).
pub block_size: u32,
/// The log2 of the block size. If the two fields do not agree, the archive is considered corrupted.
pub block_log: u16,
/// Compressor used for data
pub compressor: Compressor,
/// Optional Compressor used for data stored in image
pub compression_options: Option<CompressionOptions>,
/// Last modification time of the archive. Count seconds since 00:00, Jan 1st 1970 UTC (not counting leap seconds).
/// This is unsigned, so it expires in the year 2106 (as opposed to 2038).
pub mod_time: u32,
/// ID's stored for gui(s) and uid(s)
pub id_table: Vec<Id>,
/// Fragments Lookup Table
pub fragments: Option<Vec<Fragment>>,
/// All files and directories in filesystem
pub root: Nodes<SquashfsFileReader>,
/// File reader
pub(crate) reader: Mutex<Box<dyn BufReadSeek + 'b>>,
/// Cache used in the decompression
pub(crate) cache: RwLock<Cache>,
/// Superblock Flag to remove duplicate flags
pub(crate) no_duplicate_files: bool,
}
impl<'b> FilesystemReader<'b> {
/// Call [`Squashfs::from_reader`], then [`Squashfs::into_filesystem_reader`]
///
/// With default kind: [`crate::kind::LE_V4_0`] and offset `0`.
pub fn from_reader<R>(reader: R) -> Result<Self, BackhandError>
where
R: BufReadSeek + 'b,
{
let squashfs = Squashfs::from_reader_with_offset(reader, 0)?;
squashfs.into_filesystem_reader()
}
/// Same as [`Self::from_reader`], but seek'ing to `offset` in `reader` before reading
pub fn from_reader_with_offset<R>(reader: R, offset: u64) -> Result<Self, BackhandError>
where
R: BufReadSeek + 'b,
{
let squashfs = Squashfs::from_reader_with_offset(reader, offset)?;
squashfs.into_filesystem_reader()
}
/// Same as [`Self::from_reader_with_offset`], but setting custom `kind`
pub fn from_reader_with_offset_and_kind<R>(
reader: R,
offset: u64,
kind: Kind,
) -> Result<Self, BackhandError>
where
R: BufReadSeek + 'b,
{
let squashfs = Squashfs::from_reader_with_offset_and_kind(reader, offset, kind)?;
squashfs.into_filesystem_reader()
}
/// Return a file handler for this file
pub fn file<'a>(&'a self, file: &'a SquashfsFileReader) -> FilesystemReaderFile<'a, 'b> {
FilesystemReaderFile::new(self, file)
}
/// Iterator of all files, including the root
///
/// # Example
/// Used when extracting a file from the image, for example using [`FilesystemReaderFile`]:
/// ```rust,no_run
/// # use std::fs::File;
/// # use std::io::BufReader;
/// # use backhand::{
/// # FilesystemReader, InnerNode, Squashfs, SquashfsBlockDevice, SquashfsCharacterDevice,
/// # SquashfsDir, SquashfsSymlink,
/// # };
/// # let file = BufReader::new(File::open("image.squashfs").unwrap());
/// # let filesystem = FilesystemReader::from_reader(file).unwrap();
/// // [snip: creating FilesystemReader]
///
/// for node in filesystem.files() {
/// // extract
/// match &node.inner {
/// InnerNode::File(file) => {
/// let mut reader = filesystem
/// .file(&file)
/// .reader();
/// // Then, do something with the reader
/// },
/// _ => (),
/// }
/// }
/// ```
pub fn files(&self) -> impl Iterator<Item = &Node<SquashfsFileReader>> {
self.root.nodes.iter()
}
}
/// Filesystem handle for file
#[derive(Copy, Clone)]
pub struct FilesystemReaderFile<'a, 'b> {
pub(crate) system: &'a FilesystemReader<'b>,
pub(crate) file: &'a SquashfsFileReader,
}
impl<'a, 'b> FilesystemReaderFile<'a, 'b> {
pub fn new(system: &'a FilesystemReader<'b>, file: &'a SquashfsFileReader) -> Self {
Self { system, file }
}
/// Create [`SquashfsReadFile`] that impls [`std::io::Read`] from [`FilesystemReaderFile`].
/// This can be used to then call functions from [`std::io::Read`]
/// to de-compress and read the data from this file.
///
/// [Read::read]: std::io::Read::read
/// [Vec::clear]: Vec::clear
pub fn reader(&self) -> SquashfsReadFile<'a, 'b> {
self.raw_data_reader().into_reader()
}
pub fn fragment(&self) -> Option<&'a Fragment> {
if self.file.frag_index() == 0xffffffff {
None
} else {
self.system.fragments.as_ref().map(|fragments| &fragments[self.file.frag_index()])
}
}
pub(crate) fn raw_data_reader(&self) -> SquashfsRawData<'a, 'b> {
SquashfsRawData::new(Self { system: self.system, file: self.file })
}
}
impl<'a> IntoIterator for FilesystemReaderFile<'a, '_> {
type IntoIter = BlockIterator<'a>;
type Item = <BlockIterator<'a> as Iterator>::Item;
fn into_iter(self) -> Self::IntoIter {
BlockIterator { blocks: self.file.block_sizes(), fragment: self.fragment() }
}
}
pub enum BlockFragment<'a> {
Block(&'a DataSize),
Fragment(&'a Fragment),
}
pub struct BlockIterator<'a> {
pub blocks: &'a [DataSize],
pub fragment: Option<&'a Fragment>,
}
impl<'a> Iterator for BlockIterator<'a> {
type Item = BlockFragment<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.blocks
.split_first()
.map(|(first, rest)| {
self.blocks = rest;
BlockFragment::Block(first)
})
.or_else(|| self.fragment.take().map(BlockFragment::Fragment))
}
}
07070100000044000081A40000000000000000000000016854DB9500002224000000000000000000000000000000000000003E00000000backhand-0.23.0/backhand/src/filesystem/reader_no_parallel.rsuse std::collections::{HashMap, VecDeque};
use std::io::{Read, SeekFrom};
use std::sync::{Arc, Mutex};
use super::node::Nodes;
use crate::compressor::{CompressionOptions, Compressor};
use crate::data::DataSize;
use crate::error::BackhandError;
use crate::filesystem::reader::{BlockFragment, BlockIterator, FilesystemReaderFile};
use crate::fragment::Fragment;
use crate::id::Id;
use crate::kinds::Kind;
use crate::reader::BufReadSeek;
use crate::squashfs::Cache;
use crate::{Node, Squashfs, SquashfsFileReader};
#[derive(Clone, Copy)]
pub(crate) struct RawDataBlock {
pub(crate) fragment: bool,
pub(crate) uncompressed: bool,
}
pub(crate) struct SquashfsRawData<'a, 'b> {
pub(crate) file: FilesystemReaderFile<'a, 'b>,
current_block: BlockIterator<'a>,
pub(crate) pos: u64,
}
impl<'a, 'b> SquashfsRawData<'a, 'b> {
pub fn new(file: FilesystemReaderFile<'a, 'b>) -> Self {
let pos = file.file.blocks_start();
let current_block = file.into_iter();
Self { file, current_block, pos }
}
fn read_raw_data(
&mut self,
data: &mut Vec<u8>,
block: &BlockFragment<'a>,
) -> Result<RawDataBlock, BackhandError> {
match block {
BlockFragment::Block(block) => {
let block_size = block.size() as usize;
// sparse file, don't read from reader, just fill with superblock.block size of 0's
if block_size == 0 {
*data = vec![0; self.file.system.block_size as usize];
return Ok(RawDataBlock { fragment: false, uncompressed: true });
}
data.resize(block_size, 0);
//NOTE: storing/restoring the file-pos is not required at the
//moment of writing, but in the future, it may.
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(self.pos))?;
reader.read_exact(data)?;
self.pos = reader.stream_position()?;
}
Ok(RawDataBlock { fragment: false, uncompressed: block.uncompressed() })
}
BlockFragment::Fragment(fragment) => {
// if in the cache, just read from the cache bytes and return the fragment bytes
{
let cache = self.file.system.cache.read().unwrap();
if let Some(cache_bytes) = cache.fragment_cache.get(&fragment.start) {
//if in cache, just return the cache, don't read it
let range = self.fragment_range();
tracing::trace!("fragment in cache: {:02x}:{range:02x?}", fragment.start);
data.resize(range.end - range.start, 0);
data.copy_from_slice(&cache_bytes[range]);
//cache is store uncompressed
return Ok(RawDataBlock { fragment: true, uncompressed: true });
}
}
// if not in the cache, read the entire fragment bytes to store into
// the cache. Once that is done, if uncompressed just return the bytes
// that were read that are for the file
tracing::trace!("fragment: reading from data");
let frag_size = fragment.size.size() as usize;
data.resize(frag_size, 0);
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(fragment.start))?;
reader.read_exact(data)?;
}
// if already decompressed, store
if fragment.size.uncompressed() {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, data.clone());
//apply the fragment offset
let range = self.fragment_range();
data.drain(range.end..);
data.drain(..range.start);
}
Ok(RawDataBlock { fragment: true, uncompressed: fragment.size.uncompressed() })
}
}
}
#[inline]
pub fn next_block(&mut self, buf: &mut Vec<u8>) -> Option<Result<RawDataBlock, BackhandError>> {
self.current_block.next().map(|next| self.read_raw_data(buf, &next))
}
#[inline]
fn fragment_range(&self) -> std::ops::Range<usize> {
let block_len = self.file.system.block_size as usize;
let block_num = self.file.file.block_sizes().len();
let file_size = self.file.file.file_len();
let frag_len = file_size - (block_num * block_len);
let frag_start = self.file.file.block_offset() as usize;
let frag_end = frag_start + frag_len;
frag_start..frag_end
}
pub fn decompress(
&self,
data: RawDataBlock,
input_buf: &mut Vec<u8>,
output_buf: &mut Vec<u8>,
) -> Result<(), BackhandError> {
// append to the output_buf is not allowed, it need to be empty
assert!(output_buf.is_empty());
// input is already decompress, so just swap the input/output, so the
// output_buf contains the final data.
if data.uncompressed {
std::mem::swap(input_buf, output_buf);
} else {
output_buf.reserve(self.file.system.block_size as usize);
self.file.system.kind.inner.compressor.decompress(
input_buf,
output_buf,
self.file.system.compressor,
)?;
// store the cache, so decompression is not duplicated
if data.fragment {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, output_buf.clone());
//apply the fragment offset
let range = self.fragment_range();
output_buf.drain(range.end..);
output_buf.drain(..range.start);
}
}
Ok(())
}
#[inline]
pub fn into_reader(self) -> SquashfsReadFile<'a, 'b> {
let block_size = self.file.system.block_size as usize;
let bytes_available = self.file.file.file_len();
SquashfsReadFile::new(block_size, self, 0, bytes_available)
}
}
pub struct SquashfsReadFile<'a, 'b> {
raw_data: SquashfsRawData<'a, 'b>,
buf_read: Vec<u8>,
buf_decompress: Vec<u8>,
//offset of buf_decompress to start reading
last_read: usize,
bytes_available: usize,
}
impl<'a, 'b> SquashfsReadFile<'a, 'b> {
fn new(
block_size: usize,
raw_data: SquashfsRawData<'a, 'b>,
last_read: usize,
bytes_available: usize,
) -> Self {
Self {
raw_data,
buf_read: Vec::with_capacity(block_size),
buf_decompress: vec![],
last_read,
bytes_available,
}
}
#[inline]
fn available(&self) -> &[u8] {
&self.buf_decompress[self.last_read..]
}
#[inline]
fn read_available(&mut self, buf: &mut [u8]) -> usize {
let available = self.available();
let read_len = buf.len().min(available.len()).min(self.bytes_available);
buf[..read_len].copy_from_slice(&available[..read_len]);
self.bytes_available -= read_len;
self.last_read += read_len;
read_len
}
#[inline]
fn read_next_block(&mut self) -> Result<(), BackhandError> {
let block = match self.raw_data.next_block(&mut self.buf_read) {
Some(block) => block?,
None => return Ok(()),
};
self.buf_decompress.clear();
self.raw_data.decompress(block, &mut self.buf_read, &mut self.buf_decompress)?;
self.last_read = 0;
Ok(())
}
}
impl Read for SquashfsReadFile<'_, '_> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
// file was fully consumed
if self.bytes_available == 0 {
self.buf_read.clear();
self.buf_decompress.clear();
return Ok(0);
}
//no data available, read the next block
if self.available().is_empty() {
self.read_next_block()?;
}
//return data from the read block/fragment
Ok(self.read_available(buf))
}
}
07070100000045000081A40000000000000000000000016854DB95000031F7000000000000000000000000000000000000003B00000000backhand-0.23.0/backhand/src/filesystem/reader_parallel.rsuse rayon::prelude::*;
use std::collections::VecDeque;
use std::io::{Read, SeekFrom};
use std::sync::{Arc, Mutex};
use crate::error::BackhandError;
use crate::filesystem::reader::{BlockFragment, BlockIterator, FilesystemReaderFile};
const PREFETCH_COUNT: usize = 8;
#[derive(Clone, Copy)]
pub(crate) struct RawDataBlock {
pub(crate) fragment: bool,
pub(crate) uncompressed: bool,
}
pub(crate) struct SquashfsRawData<'a, 'b> {
pub(crate) file: FilesystemReaderFile<'a, 'b>,
current_block: BlockIterator<'a>,
pub(crate) pos: u64,
/// Buffer pool for reusing memory across threads
buffer_pool: Arc<Mutex<Vec<Vec<u8>>>>,
/// Queue of blocks ready to be processed
prefetched_blocks: VecDeque<(Vec<u8>, RawDataBlock)>,
num_prefetch: usize,
}
impl<'a, 'b> SquashfsRawData<'a, 'b> {
pub fn new(file: FilesystemReaderFile<'a, 'b>) -> Self {
let pos = file.file.blocks_start();
let current_block = file.into_iter();
Self {
file,
current_block,
pos,
buffer_pool: Arc::new(Mutex::new(Vec::new())),
prefetched_blocks: VecDeque::new(),
num_prefetch: rayon::current_num_threads() / 2,
}
}
/// Prefetch multiple blocks in parallel
fn prefetch_blocks(&mut self) -> Result<(), BackhandError> {
for _ in 0..self.num_prefetch {
match self.current_block.next() {
Some(block_fragment) => {
let mut data = self.buffer_pool.lock().unwrap().pop().unwrap_or_default();
let block_info = self.read_raw_data(&mut data, &block_fragment)?;
self.prefetched_blocks.push_back((data, block_info));
}
None => break, // No more blocks
}
}
Ok(())
}
fn read_raw_data(
&mut self,
data: &mut Vec<u8>,
block: &BlockFragment<'a>,
) -> Result<RawDataBlock, BackhandError> {
match block {
BlockFragment::Block(block) => {
let block_size = block.size() as usize;
// sparse file, don't read from reader, just fill with superblock.block size of 0's
if block_size == 0 {
*data = vec![0; self.file.system.block_size as usize];
return Ok(RawDataBlock { fragment: false, uncompressed: true });
}
data.resize(block_size, 0);
//NOTE: storing/restoring the file-pos is not required at the
//moment of writing, but in the future, it may.
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(self.pos))?;
reader.read_exact(data)?;
self.pos = reader.stream_position()?;
}
Ok(RawDataBlock { fragment: false, uncompressed: block.uncompressed() })
}
BlockFragment::Fragment(fragment) => {
// if in the cache, just read from the cache bytes and return the fragment bytes
{
let cache = self.file.system.cache.read().unwrap();
if let Some(cache_bytes) = cache.fragment_cache.get(&fragment.start) {
//if in cache, just return the cache, don't read it
let range = self.fragment_range();
tracing::trace!("fragment in cache: {:02x}:{range:02x?}", fragment.start);
data.resize(range.end - range.start, 0);
data.copy_from_slice(&cache_bytes[range]);
//cache is store uncompressed
return Ok(RawDataBlock { fragment: true, uncompressed: true });
}
}
// if not in the cache, read the entire fragment bytes to store into
// the cache. Once that is done, if uncompressed just return the bytes
// that were read that are for the file
tracing::trace!("fragment: reading from data");
let frag_size = fragment.size.size() as usize;
data.resize(frag_size, 0);
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(fragment.start))?;
reader.read_exact(data)?;
}
// if already decompressed, store
if fragment.size.uncompressed() {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, data.clone());
//apply the fragment offset
let range = self.fragment_range();
data.drain(range.end..);
data.drain(..range.start);
}
Ok(RawDataBlock { fragment: true, uncompressed: fragment.size.uncompressed() })
}
}
}
#[inline]
pub fn next_block(&mut self, buf: &mut Vec<u8>) -> Option<Result<RawDataBlock, BackhandError>> {
// If no prefetched blocks are available, try to prefetch
if self.prefetched_blocks.is_empty() {
if let Err(e) = self.prefetch_blocks() {
return Some(Err(e));
}
}
// Return a prefetched block if available
if let Some((mut data, block_info)) = self.prefetched_blocks.pop_front() {
std::mem::swap(buf, &mut data);
// return buffer to our pool
self.buffer_pool.lock().unwrap().push(data);
Some(Ok(block_info))
} else {
// No more blocks
None
}
}
#[inline]
fn fragment_range(&self) -> std::ops::Range<usize> {
let block_len = self.file.system.block_size as usize;
let block_num = self.file.file.block_sizes().len();
let file_size = self.file.file.file_len();
let frag_len = file_size - (block_num * block_len);
let frag_start = self.file.file.block_offset() as usize;
let frag_end = frag_start + frag_len;
frag_start..frag_end
}
/// Decompress function that can be run in parallel
pub fn decompress(
&self,
data: RawDataBlock,
input_buf: &mut Vec<u8>,
output_buf: &mut Vec<u8>,
) -> Result<(), BackhandError> {
// append to the output_buf is not allowed, it need to be empty
assert!(output_buf.is_empty());
// input is already decompress, so just swap the input/output, so the
// output_buf contains the final data.
if data.uncompressed {
std::mem::swap(input_buf, output_buf);
} else {
output_buf.reserve(self.file.system.block_size as usize);
self.file.system.kind.inner.compressor.decompress(
input_buf,
output_buf,
self.file.system.compressor,
)?;
// store the cache, so decompression is not duplicated
if data.fragment {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, output_buf.clone());
//apply the fragment offset
let range = self.fragment_range();
output_buf.drain(range.end..);
output_buf.drain(..range.start);
}
}
Ok(())
}
#[inline]
pub fn into_reader(self) -> SquashfsReadFile<'a, 'b> {
// let block_size = self.file.system.block_size as usize;
let bytes_available = self.file.file.file_len();
SquashfsReadFile::new(self, 0, bytes_available)
}
}
pub struct SquashfsReadFile<'a, 'b> {
raw_data: SquashfsRawData<'a, 'b>,
buffer_pool: Arc<Mutex<Vec<Vec<u8>>>>,
decompressed_blocks: VecDeque<Vec<u8>>,
current_block_position: usize,
bytes_available: usize,
prefetch_count: usize,
}
impl<'a, 'b> SquashfsReadFile<'a, 'b> {
fn new(raw_data: SquashfsRawData<'a, 'b>, last_read: usize, bytes_available: usize) -> Self {
let buffer_pool = Arc::new(Mutex::new(Vec::new()));
Self {
raw_data,
buffer_pool,
decompressed_blocks: VecDeque::new(),
current_block_position: last_read,
bytes_available,
prefetch_count: PREFETCH_COUNT,
}
}
/// Fill the decompressed blocks queue with data
fn fill_decompressed_queue(&mut self) -> Result<(), BackhandError> {
// If we already have data, no need to fill
if !self.decompressed_blocks.is_empty()
&& self.current_block_position < self.decompressed_blocks.front().unwrap().len()
{
return Ok(());
}
// If we're in the middle of a block, advance to the next one
if !self.decompressed_blocks.is_empty() {
self.decompressed_blocks.pop_front();
self.current_block_position = 0;
// If we still have data, no need to fill
if !self.decompressed_blocks.is_empty() {
return Ok(());
}
}
// We need to decompress more blocks
// Collect blocks to decompress
let mut read_blocks = Vec::new();
let mut buf_pool = self.buffer_pool.lock().unwrap();
for _ in 0..self.prefetch_count {
let mut input_buf = buf_pool.pop().unwrap_or_default();
if let Some(block_result) = self.raw_data.next_block(&mut input_buf) {
match block_result {
Ok(block_info) => read_blocks.push((input_buf, block_info)),
Err(e) => return Err(e),
}
} else {
// Return unused buffer to the pool
buf_pool.push(input_buf);
break;
}
}
// Release lock before parallel processing
drop(buf_pool);
if read_blocks.is_empty() {
return Ok(());
}
// Use Rayon to decompress blocks in parallel
let raw_data = &self.raw_data;
let buffer_pool = &self.buffer_pool;
let decompressed_results: Vec<Result<Vec<u8>, BackhandError>> = read_blocks
.into_par_iter()
.map(|(mut input_buf, block_info)| {
let mut output_buf = Vec::new();
let result = raw_data.decompress(block_info, &mut input_buf, &mut output_buf);
// Return input buffer to the pool
buffer_pool.lock().unwrap().push(input_buf);
result.map(|_| output_buf)
})
.collect();
// Process results
for result in decompressed_results {
match result {
Ok(output_buf) => self.decompressed_blocks.push_back(output_buf),
Err(e) => return Err(e),
}
}
self.current_block_position = 0;
Ok(())
}
/// Available bytes in the current block
#[inline]
fn available_in_current_block(&self) -> &[u8] {
if self.decompressed_blocks.is_empty() {
&[]
} else {
&self.decompressed_blocks.front().unwrap()[self.current_block_position..]
}
}
/// Read available bytes from the current block
#[inline]
fn read_available(&mut self, buf: &mut [u8]) -> usize {
let available = self.available_in_current_block();
let read_len = buf.len().min(available.len()).min(self.bytes_available);
if read_len > 0 {
buf[..read_len].copy_from_slice(&available[..read_len]);
self.bytes_available -= read_len;
self.current_block_position += read_len;
}
read_len
}
}
impl Read for SquashfsReadFile<'_, '_> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
// Check if we're at the end of the file
if self.bytes_available == 0 {
return Ok(0);
}
// Ensure we have data to read
if self.fill_decompressed_queue().is_err() {
return Err(std::io::Error::other("Failed to decompress data"));
}
// If we have no more blocks, we're done
if self.decompressed_blocks.is_empty() {
return Ok(0);
}
// Read available data
Ok(self.read_available(buf))
}
}
07070100000046000081A40000000000000000000000016854DB9500008D08000000000000000000000000000000000000003200000000backhand-0.23.0/backhand/src/filesystem/writer.rsuse std::ffi::OsStr;
use std::io::{Cursor, Read, Seek, SeekFrom, Write};
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::{SystemTime, UNIX_EPOCH};
use deku::prelude::*;
use tracing::{error, info, trace};
use super::node::{InnerNode, Nodes};
use super::normalize_squashfs_path;
use crate::compressor::{CompressionOptions, Compressor};
use crate::data::DataWriter;
use crate::entry::Entry;
use crate::error::BackhandError;
use crate::filesystem::node::SquashfsSymlink;
use crate::id::Id;
use crate::kind::Kind;
use crate::kinds::LE_V4_0;
use crate::metadata::{self, MetadataWriter, METADATA_MAXSIZE};
use crate::reader::WriteSeek;
use crate::squashfs::SuperBlock;
use crate::{
fragment, FilesystemReader, Flags, Node, NodeHeader, SquashfsBlockDevice,
SquashfsCharacterDevice, SquashfsDir, SquashfsFileWriter, DEFAULT_BLOCK_SIZE, DEFAULT_PAD_LEN,
MAX_BLOCK_SIZE, MIN_BLOCK_SIZE,
};
/// Representation of SquashFS filesystem to be written back to an image
/// - Use [`Self::from_fs_reader`] to write with the data from a previous SquashFS image
/// - Use [`Self::default`] to create an empty SquashFS image without an original image. For example:
/// ```rust
/// # use std::time::SystemTime;
/// # use backhand::{NodeHeader, Id, FilesystemCompressor, FilesystemWriter, SquashfsDir, compression::Compressor, kind, DEFAULT_BLOCK_SIZE, ExtraXz, CompressionExtra, kind::Kind};
/// // Add empty default FilesytemWriter
/// let mut fs = FilesystemWriter::default();
/// fs.set_current_time();
/// fs.set_block_size(DEFAULT_BLOCK_SIZE);
/// fs.set_only_root_id();
/// fs.set_kind(Kind::from_const(kind::LE_V4_0).unwrap());
///
/// // set root image permissions
/// let header = NodeHeader {
/// permissions: 0o755,
/// ..NodeHeader::default()
/// };
/// fs.set_root_mode(0o777);
///
/// // set extra compression options
/// let mut xz_extra = ExtraXz::default();
/// xz_extra.level(9).unwrap();
/// let extra = CompressionExtra::Xz(xz_extra);
/// let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
/// compressor.extra(extra).unwrap();
/// fs.set_compressor(compressor);
///
/// // push some dirs and a file
/// fs.push_dir("usr", header);
/// fs.push_dir("usr/bin", header);
/// fs.push_file(std::io::Cursor::new(vec![0x00, 0x01]), "usr/bin/file", header);
/// ```
#[derive(Debug)]
pub struct FilesystemWriter<'a, 'b, 'c> {
pub(crate) kind: Kind,
/// The size of a data block in bytes. Must be a power of two between 4096 (4k) and 1048576 (1 MiB).
pub(crate) block_size: u32,
/// Last modification time of the archive. Count seconds since 00:00, Jan 1st 1970 UTC (not counting leap seconds).
/// This is unsigned, so it expires in the year 2106 (as opposed to 2038).
pub(crate) mod_time: u32,
/// 32 bit user and group IDs
pub(crate) id_table: Vec<Id>,
/// Compressor used when writing
pub(crate) fs_compressor: FilesystemCompressor,
/// All files and directories in filesystem, including root
pub(crate) root: Nodes<SquashfsFileWriter<'a, 'b, 'c>>,
/// The log2 of the block size. If the two fields do not agree, the archive is considered corrupted.
pub(crate) block_log: u16,
pub(crate) pad_len: u32,
/// Superblock Flag to remove duplicate flags
pub(crate) no_duplicate_files: bool,
pub(crate) emit_compression_options: bool,
}
impl Default for FilesystemWriter<'_, '_, '_> {
/// Create default FilesystemWriter
///
/// block_size: [`DEFAULT_BLOCK_SIZE`], compressor: default XZ compression, no nodes,
/// kind: [`LE_V4_0`], and mod_time: `0`.
fn default() -> Self {
let block_size = DEFAULT_BLOCK_SIZE;
Self {
block_size,
mod_time: 0,
id_table: Id::root(),
fs_compressor: FilesystemCompressor::default(),
kind: Kind { inner: Arc::new(LE_V4_0) },
root: Nodes::new_root(NodeHeader::default()),
block_log: (block_size as f32).log2() as u16,
pad_len: DEFAULT_PAD_LEN,
no_duplicate_files: true,
emit_compression_options: true,
}
}
}
impl<'a, 'b, 'c> FilesystemWriter<'a, 'b, 'c> {
/// Set block size
///
/// # Panics
/// If invalid, must be [`MIN_BLOCK_SIZE`] `> block_size <` [`MAX_BLOCK_SIZE`]
pub fn set_block_size(&mut self, block_size: u32) {
if !(MIN_BLOCK_SIZE..=MAX_BLOCK_SIZE).contains(&block_size) {
panic!("invalid block_size");
}
self.block_size = block_size;
self.block_log = (block_size as f32).log2() as u16;
}
/// Set time of image as `mod_time`
///
/// # Example: Set to `Wed Oct 19 01:26:15 2022`
/// ```rust
/// # use backhand::{FilesystemWriter, kind};
/// let mut fs = FilesystemWriter::default();
/// fs.set_time(0x634f_5237);
/// ```
pub fn set_time(&mut self, mod_time: u32) {
self.mod_time = mod_time;
}
/// Set time of image as current time
pub fn set_current_time(&mut self) {
self.mod_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;
}
/// Set kind as `kind`
///
/// # Example: Set kind to default V4.0
/// ```rust
/// # use backhand::{FilesystemWriter, kind::Kind, kind};
/// let mut fs = FilesystemWriter::default();
/// fs.set_kind(Kind::from_const(kind::LE_V4_0).unwrap());
/// ```
pub fn set_kind(&mut self, kind: Kind) {
self.kind = kind;
}
/// Set root mode as `mode`
///
/// # Example
///```rust
/// # use backhand::FilesystemWriter;
/// let mut fs = FilesystemWriter::default();
/// fs.set_root_mode(0o777);
/// ```
pub fn set_root_mode(&mut self, mode: u16) {
self.root.root_mut().header.permissions = mode;
}
/// Set root uid as `uid`
pub fn set_root_uid(&mut self, uid: u32) {
self.root.root_mut().header.uid = uid;
}
/// Set root gid as `gid`
pub fn set_root_gid(&mut self, gid: u32) {
self.root.root_mut().header.gid = gid;
}
/// Set compressor as `compressor`
///
///```rust
/// # use backhand::{FilesystemWriter, FilesystemCompressor, compression::Compressor};
/// let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
/// ```
pub fn set_compressor(&mut self, compressor: FilesystemCompressor) {
self.fs_compressor = compressor;
}
/// Set id_table to [`Id::root`], removing old entries
pub fn set_only_root_id(&mut self) {
self.id_table = Id::root();
}
/// Set padding(zero bytes) added to the end of the image after calling [`write`].
///
/// For example, if given `pad_kib` of 8; a 8K padding will be added to the end of the image.
///
/// Default: [`DEFAULT_PAD_LEN`]
pub fn set_kib_padding(&mut self, pad_kib: u32) {
self.pad_len = pad_kib * 1024;
}
/// Set *no* padding(zero bytes) added to the end of the image after calling [`write`].
pub fn set_no_padding(&mut self) {
self.pad_len = 0;
}
/// Set if we perform duplicate file checking, on by default
pub fn set_no_duplicate_files(&mut self, value: bool) {
self.no_duplicate_files = value;
}
/// Set if compression options are written
pub fn set_emit_compression_options(&mut self, value: bool) {
self.emit_compression_options = value;
}
/// Inherit filesystem structure and properties from `reader`
pub fn from_fs_reader(reader: &'a FilesystemReader<'b>) -> Result<Self, BackhandError> {
let mut root: Vec<Node<_>> = reader
.root
.nodes
.iter()
.map(|node| {
let inner = match &node.inner {
InnerNode::File(file) => {
let reader = reader.file(file);
InnerNode::File(SquashfsFileWriter::SquashfsFile(reader))
}
InnerNode::Symlink(x) => InnerNode::Symlink(x.clone()),
InnerNode::Dir(x) => InnerNode::Dir(*x),
InnerNode::CharacterDevice(x) => InnerNode::CharacterDevice(*x),
InnerNode::BlockDevice(x) => InnerNode::BlockDevice(*x),
InnerNode::NamedPipe => InnerNode::NamedPipe,
InnerNode::Socket => InnerNode::Socket,
};
Node { fullpath: node.fullpath.clone(), header: node.header, inner }
})
.collect();
root.sort();
Ok(Self {
kind: Kind { inner: reader.kind.inner.clone() },
block_size: reader.block_size,
block_log: reader.block_log,
fs_compressor: FilesystemCompressor::new(
reader.compressor,
reader.compression_options,
)?,
mod_time: reader.mod_time,
id_table: reader.id_table.clone(),
root: Nodes { nodes: root },
pad_len: DEFAULT_PAD_LEN,
no_duplicate_files: reader.no_duplicate_files,
emit_compression_options: true,
})
}
//find the node relative to this path and return a mutable reference
fn mut_node<S>(&mut self, find_path: S) -> Option<&mut Node<SquashfsFileWriter<'a, 'b, 'c>>>
where
S: AsRef<Path>,
{
//the search path root prefix is optional, so remove it if present to
//not affect the search
let find_path = normalize_squashfs_path(find_path.as_ref()).ok()?;
self.root.node_mut(find_path)
}
fn insert_node<P>(
&mut self,
path: P,
header: NodeHeader,
node: InnerNode<SquashfsFileWriter<'a, 'b, 'c>>,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
// create gid id
self.lookup_add_id(header.gid);
// create uid id
self.lookup_add_id(header.uid);
let path = normalize_squashfs_path(path.as_ref())?;
let node = Node::new(path, header, node);
self.root.insert(node)
}
/// Insert `reader` into filesystem with `path` and metadata `header`.
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_file<P>(
&mut self,
reader: impl Read + 'c,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let reader = Arc::new(Mutex::new(reader));
let new_file = InnerNode::File(SquashfsFileWriter::UserDefined(reader));
self.insert_node(path, header, new_file)?;
Ok(())
}
/// Take a mutable reference to existing file at `find_path`
pub fn mut_file<S>(&mut self, find_path: S) -> Option<&mut SquashfsFileWriter<'a, 'b, 'c>>
where
S: AsRef<Path>,
{
self.mut_node(find_path).and_then(|node| {
if let InnerNode::File(file) = &mut node.inner {
Some(file)
} else {
None
}
})
}
/// Replace an existing file
pub fn replace_file<S>(
&mut self,
find_path: S,
reader: impl Read + 'c,
) -> Result<(), BackhandError>
where
S: AsRef<Path>,
{
let file = self.mut_file(find_path).ok_or(BackhandError::FileNotFound)?;
let reader = Arc::new(Mutex::new(reader));
*file = SquashfsFileWriter::UserDefined(reader);
Ok(())
}
/// Insert symlink `path` -> `link`
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_symlink<P, S>(
&mut self,
link: S,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
S: Into<PathBuf>,
{
let new_symlink = InnerNode::Symlink(SquashfsSymlink { link: link.into() });
self.insert_node(path, header, new_symlink)?;
Ok(())
}
/// Insert empty `dir` at `path`
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_dir<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_dir = InnerNode::Dir(SquashfsDir::default());
self.insert_node(path, header, new_dir)?;
Ok(())
}
/// Recursively create an empty directory and all of its parent components
/// if they are missing.
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_dir_all<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
//the search path root prefix is optional, so remove it if present to
//not affect the search
let path = normalize_squashfs_path(path.as_ref())?;
//TODO this is not elegant, find a better solution
let ancestors: Vec<&Path> = path.ancestors().collect();
for file in ancestors.iter().rev() {
match self.root.nodes.binary_search_by(|node| node.fullpath.as_path().cmp(file)) {
Ok(index) => {
//if exists, but is not a directory, return an error
let node = &self.root.nodes[index];
if !matches!(&node.inner, InnerNode::Dir(_)) {
return Err(BackhandError::InvalidFilePath);
}
}
//if the dir don't exists, create it
Err(_index) => self.push_dir(file, header)?,
}
}
Ok(())
}
/// Insert character device with `device_number` at `path`
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_char_device<P>(
&mut self,
device_number: u32,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::CharacterDevice(SquashfsCharacterDevice { device_number });
self.insert_node(path, header, new_device)?;
Ok(())
}
/// Insert block device with `device_number` at `path`
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_block_device<P>(
&mut self,
device_number: u32,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::BlockDevice(SquashfsBlockDevice { device_number });
self.insert_node(path, header, new_device)?;
Ok(())
}
/// Insert FIFO (named pipe)
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_fifo<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::NamedPipe;
self.insert_node(path, header, new_device)?;
Ok(())
}
/// Insert Socket (UNIX domain socket)
///
/// The `uid` and `gid` in `header` are added to FilesystemWriters id's
pub fn push_socket<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::Socket;
self.insert_node(path, header, new_device)?;
Ok(())
}
/// Same as [`Self::write`], but seek'ing to `offset` in `w` before reading. This offset
/// is treated as the base image offset.
pub fn write_with_offset<W>(
&mut self,
w: W,
offset: u64,
) -> Result<(SuperBlock, u64), BackhandError>
where
W: Write + Seek,
{
let mut writer = WriterWithOffset::new(w, offset)?;
self.write(&mut writer)
}
fn write_data<W>(
&mut self,
compressor: FilesystemCompressor,
block_size: u32,
mut writer: W,
data_writer: &mut DataWriter<'b>,
) -> Result<(), BackhandError>
where
W: WriteSeek,
{
let files = self.root.nodes.iter_mut().filter_map(|node| match &mut node.inner {
InnerNode::File(file) => Some(file),
_ => None,
});
for file in files {
let (filesize, added) = match file {
SquashfsFileWriter::UserDefined(file) => {
let file_ptr = Arc::clone(file);
let mut file_lock = file_ptr.lock().unwrap();
data_writer.add_bytes(&mut *file_lock, &mut writer)?
}
SquashfsFileWriter::SquashfsFile(file) => {
// if the source file and the destination files are both
// squashfs files and use the same compressor and block_size
// just copy the data, don't compress->decompress
if file.system.compressor == compressor.id
&& file.system.compression_options == compressor.options
&& file.system.block_size == block_size
{
data_writer.just_copy_it(file.raw_data_reader(), &mut writer)?
} else {
data_writer.add_bytes(file.reader(), &mut writer)?
}
}
SquashfsFileWriter::Consumed(_, _) => unreachable!(),
};
*file = SquashfsFileWriter::Consumed(filesize, added);
}
Ok(())
}
/// Create SquashFS file system from each node of Tree
///
/// This works by recursively creating Inodes and Dirs for each node in the tree. This also
/// keeps track of parent directories by calling this function on all nodes of a dir to get only
/// the nodes, but going into the child dirs in the case that it contains a child dir.
#[allow(clippy::too_many_arguments)]
fn write_inode_dir<'slf>(
&'slf self,
inode_writer: &'_ mut MetadataWriter,
dir_writer: &'_ mut MetadataWriter,
parent_node_id: u32,
node_id: NonZeroUsize,
superblock: &SuperBlock,
kind: &Kind,
id_table: &Vec<Id>,
) -> Result<Entry<'slf>, BackhandError> {
let node = &self.root.node(node_id).unwrap();
let filename = node.fullpath.file_name().unwrap_or(OsStr::new("/"));
//if not a dir, return the entry
match &node.inner {
InnerNode::File(SquashfsFileWriter::Consumed(filesize, added)) => {
return Ok(Entry::file(
filename,
node.header,
node_id.get().try_into().unwrap(),
inode_writer,
*filesize,
added,
superblock,
kind,
id_table,
))
}
InnerNode::File(_) => unreachable!(),
InnerNode::Symlink(symlink) => {
return Ok(Entry::symlink(
filename,
node.header,
symlink,
node_id.get().try_into().unwrap(),
inode_writer,
superblock,
kind,
id_table,
))
}
InnerNode::CharacterDevice(char) => {
return Ok(Entry::char(
filename,
node.header,
char,
node_id.get().try_into().unwrap(),
inode_writer,
superblock,
kind,
id_table,
))
}
InnerNode::BlockDevice(block) => {
return Ok(Entry::block_device(
filename,
node.header,
block,
node_id.get().try_into().unwrap(),
inode_writer,
superblock,
kind,
id_table,
))
}
InnerNode::NamedPipe => {
return Ok(Entry::named_pipe(
filename,
node.header,
node_id.get().try_into().unwrap(),
inode_writer,
superblock,
kind,
id_table,
))
}
InnerNode::Socket => {
return Ok(Entry::socket(
filename,
node.header,
node_id.get().try_into().unwrap(),
inode_writer,
superblock,
kind,
id_table,
))
}
// if dir, fall through
InnerNode::Dir(_) => (),
};
// ladies and gentlemen, we have a directory
let entries: Vec<_> = self
.root
.children_of(node_id)
//only direct children
.filter(|(_child_id, child)| {
child.fullpath.parent().map(|child| child == node.fullpath).unwrap_or(false)
})
.map(|(child_id, _child)| {
self.write_inode_dir(
inode_writer,
dir_writer,
node_id.get().try_into().unwrap(),
child_id,
superblock,
kind,
id_table,
)
})
.collect::<Result<_, _>>()?;
let children_num = entries.len();
// write dir
let block_index = dir_writer.metadata_start;
let block_offset = dir_writer.uncompressed_bytes.len() as u16;
trace!("WRITING DIR: {block_offset:#02x?}");
let mut total_size: usize = 3;
for dir in Entry::into_dir(entries) {
let mut bytes = Cursor::new(vec![]);
let mut writer = Writer::new(&mut bytes);
dir.to_writer(&mut writer, kind.inner.type_endian)?;
total_size += bytes.get_ref().len();
dir_writer.write_all(bytes.get_ref())?;
}
let entry = Entry::path(
filename,
node.header,
node_id.get().try_into().unwrap(),
children_num,
parent_node_id,
inode_writer,
total_size,
block_offset,
block_index,
superblock,
kind,
id_table,
);
trace!("[{:?}] entries: {:#02x?}", filename, &entry);
Ok(entry)
}
/// Generate and write the resulting squashfs image to `w`
///
/// # Returns
/// (written populated [`SuperBlock`], total amount of bytes written including padding)
pub fn write<W: Write + Seek>(&mut self, mut w: W) -> Result<(SuperBlock, u64), BackhandError> {
let mut superblock =
SuperBlock::new(self.fs_compressor.id, Kind { inner: self.kind.inner.clone() });
if self.no_duplicate_files {
superblock.flags |= Flags::DataHasBeenDeduplicated as u16;
}
trace!("{:#02x?}", self.root);
// Empty Squashfs Superblock
w.write_all(&[0x00; 96])?;
if self.emit_compression_options {
trace!("writing compression options, if exists");
let options = self.kind.inner.compressor.compression_options(
&mut superblock,
&self.kind,
self.fs_compressor,
)?;
w.write_all(&options)?;
}
let mut data_writer = DataWriter::new(
self.kind.inner.compressor,
self.fs_compressor,
self.block_size,
self.no_duplicate_files,
);
let mut inode_writer = MetadataWriter::new(
self.fs_compressor,
self.block_size,
Kind { inner: self.kind.inner.clone() },
);
let mut dir_writer = MetadataWriter::new(
self.fs_compressor,
self.block_size,
Kind { inner: self.kind.inner.clone() },
);
info!("Creating Inodes and Dirs");
//trace!("TREE: {:#02x?}", &self.root);
info!("Writing Data");
self.write_data(self.fs_compressor, self.block_size, &mut w, &mut data_writer)?;
info!("Writing Data Fragments");
// Compress fragments and write
data_writer.finalize(&mut w)?;
info!("Writing Other stuff");
let root = self.write_inode_dir(
&mut inode_writer,
&mut dir_writer,
0,
1.try_into().unwrap(),
&superblock,
&self.kind,
&self.id_table,
)?;
superblock.root_inode = ((root.start as u64) << 16) | ((root.offset as u64) & 0xffff);
superblock.inode_count = self.root.nodes.len().try_into().unwrap();
superblock.block_size = self.block_size;
superblock.block_log = self.block_log;
superblock.mod_time = self.mod_time;
info!("Writing Inodes");
superblock.inode_table = w.stream_position()?;
inode_writer.finalize(&mut w)?;
info!("Writing Dirs");
superblock.dir_table = w.stream_position()?;
dir_writer.finalize(&mut w)?;
info!("Writing Frag Lookup Table");
let (table_position, count) =
self.write_lookup_table(&mut w, &data_writer.fragment_table, fragment::SIZE)?;
superblock.frag_table = table_position;
superblock.frag_count = count;
info!("Writing Id Lookup Table");
let (table_position, count) = self.write_lookup_table(&mut w, &self.id_table, Id::SIZE)?;
superblock.id_table = table_position;
superblock.id_count = count.try_into().unwrap();
info!("Finalize Superblock and End Bytes");
let bytes_written = self.finalize(w, &mut superblock)?;
info!("Success");
Ok((superblock, bytes_written))
}
fn finalize<W>(&self, mut w: W, superblock: &mut SuperBlock) -> Result<u64, BackhandError>
where
W: Write + Seek,
{
superblock.bytes_used = w.stream_position()?;
// pad bytes if required
let mut pad_len = 0;
if self.pad_len != 0 {
// Pad out block_size to 4K
info!("Writing Padding");
let blocks_used: u32 = u32::try_from(superblock.bytes_used).unwrap() / self.pad_len;
let total_pad_len = (blocks_used + 1) * self.pad_len;
pad_len = total_pad_len - u32::try_from(superblock.bytes_used).unwrap();
// Write 1K at a time
let mut total_written = 0;
while w.stream_position()? < (superblock.bytes_used + u64::from(pad_len)) {
let arr = &[0x00; 1024];
// check if last block to write
let len = if (pad_len - total_written) < 1024 {
(pad_len - total_written) % 1024
} else {
// else, full 1K
1024
};
w.write_all(&arr[..len.try_into().unwrap()])?;
total_written += len;
}
}
// Seek back the beginning and write the superblock
info!("Writing Superblock");
w.rewind()?;
let mut writer = Writer::new(&mut w);
superblock.to_writer(
&mut writer,
(
self.kind.inner.magic,
self.kind.inner.version_major,
self.kind.inner.version_minor,
self.kind.inner.type_endian,
),
)?;
info!("Writing Finished");
//clean any cache, make sure the output is on disk
w.flush()?;
Ok(superblock.bytes_used + u64::from(pad_len))
}
/// For example, writing a fragment table:
/// ```text
/// ┌──────────────────────────────┐
/// │Metadata │◄───┐
/// │┌────────────────────────────┐│ │
/// ││pointer to fragment block ││ │
/// │├────────────────────────────┤│ │
/// ││pointer to fragment block ││ │
/// │└────────────────────────────┘│ │
/// └──────────────────────────────┘ │
/// ┌──────────────────────────────┐ │
/// │Metadata │◄─┐ │
/// │┌────────────────────────────┐│ │ │
/// ││pointer to fragment block ││ │ │
/// │├────────────────────────────┤│ │ │
/// ││pointer to fragment block ││ │ │
/// │└────────────────────────────┘│ │ │
/// └──────────────────────────────┘ │ │
/// ┌──────────────────────────────┐──│─│───►superblock.frag_table
/// │Frag Table │ │ │
/// │┌────────────────────────────┐│ │ │
/// ││fragment0(u64) ─────────│─┘
/// │├────────────────────────────┤│ │
/// ││fragment1(u64) ─────────┘
/// │└────────────────────────────┘│
/// └──────────────────────────────┘
/// ```
fn write_lookup_table<D, W>(
&self,
mut w: W,
table: &[D],
element_size: usize,
) -> Result<(u64, u32), BackhandError>
where
D: DekuWriter<deku::ctx::Endian>,
W: Write + Seek,
{
let mut ptrs: Vec<u64> = vec![];
let mut table_bytes = Cursor::new(Vec::with_capacity(table.len() * element_size));
let mut iter = table.iter().peekable();
while let Some(t) = iter.next() {
// convert fragment ptr to bytes
let mut table_writer = Writer::new(&mut table_bytes);
t.to_writer(&mut table_writer, self.kind.inner.type_endian)?;
// once table_bytes + next is over the maximum size of a metadata block, write
if ((table_bytes.get_ref().len() + element_size) > METADATA_MAXSIZE)
|| iter.peek().is_none()
{
ptrs.push(w.stream_position()?);
// write metadata len
let len = metadata::set_if_uncompressed(table_bytes.get_ref().len() as u16);
let mut writer = Writer::new(&mut w);
len.to_writer(&mut writer, self.kind.inner.data_endian)?;
// write metadata bytes
w.write_all(table_bytes.get_ref())?;
table_bytes.get_mut().clear();
table_bytes.rewind()?;
}
}
let table_position = w.stream_position()?;
let count = table.len() as u32;
// write ptr
for ptr in ptrs {
let mut writer = Writer::new(&mut w);
ptr.to_writer(&mut writer, self.kind.inner.type_endian)?;
}
Ok((table_position, count))
}
/// Return index of id, adding if required
fn lookup_add_id(&mut self, id: u32) -> u32 {
let found = self.id_table.iter().position(|a| a.num == id);
match found {
Some(found) => found as u32,
None => {
self.id_table.push(Id::new(id));
self.id_table.len() as u32 - 1
}
}
}
}
struct WriterWithOffset<W: WriteSeek> {
w: W,
offset: u64,
}
impl<W: WriteSeek> WriterWithOffset<W> {
pub fn new(mut w: W, offset: u64) -> std::io::Result<Self> {
w.seek(SeekFrom::Start(offset))?;
Ok(Self { w, offset })
}
}
impl<W> Write for WriterWithOffset<W>
where
W: WriteSeek,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.w.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.w.flush()
}
}
impl<W> Seek for WriterWithOffset<W>
where
W: Write + Seek,
{
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let seek = match pos {
SeekFrom::Start(start) => SeekFrom::Start(self.offset + start),
seek => seek,
};
self.w.seek(seek).map(|x| x - self.offset)
}
}
/// All compression options for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone, Default)]
pub struct FilesystemCompressor {
pub(crate) id: Compressor,
pub(crate) options: Option<CompressionOptions>,
pub(crate) extra: Option<CompressionExtra>,
}
impl FilesystemCompressor {
pub fn new(id: Compressor, options: Option<CompressionOptions>) -> Result<Self, BackhandError> {
match (id, options) {
// lz4 always requires options
(Compressor::Lz4, None) => {
error!("Lz4 compression options missing");
return Err(BackhandError::InvalidCompressionOption);
}
//others having no options is always valid
(_, None) => {}
//only the corresponding option are valid
(Compressor::Gzip, Some(CompressionOptions::Gzip(_)))
| (Compressor::Lzma, Some(CompressionOptions::Lzma))
| (Compressor::Lzo, Some(CompressionOptions::Lzo(_)))
| (Compressor::Xz, Some(CompressionOptions::Xz(_)))
| (Compressor::Lz4, Some(CompressionOptions::Lz4(_)))
| (Compressor::Zstd, Some(CompressionOptions::Zstd(_))) => {}
//other combinations are invalid
_ => {
error!("invalid compression settings");
return Err(BackhandError::InvalidCompressionOption);
}
}
Ok(Self { id, options, extra: None })
}
/// Set options that are originally derived from the image if from a [`FilesystemReader`].
/// These options will be written to the image when
/// <https://github.com/wcampbell0x2a/backhand/issues/53> is fixed.
pub fn options(&mut self, options: CompressionOptions) -> Result<(), BackhandError> {
self.options = Some(options);
Ok(())
}
/// Extra options that are *only* using during compression and are *not* stored in the
/// resulting image
pub fn extra(&mut self, extra: CompressionExtra) -> Result<(), BackhandError> {
if matches!(extra, CompressionExtra::Xz(_)) && matches!(self.id, Compressor::Xz) {
self.extra = Some(extra);
return Ok(());
}
error!("invalid extra compression settings");
Err(BackhandError::InvalidCompressionOption)
}
}
/// Compression options only for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone)]
pub enum CompressionExtra {
Xz(ExtraXz),
}
/// Xz compression option for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone, Default)]
pub struct ExtraXz {
pub(crate) level: Option<u32>,
}
impl ExtraXz {
/// Set compress preset level. Must be in range `0..=9`
pub fn level(&mut self, level: u32) -> Result<(), BackhandError> {
if level > 9 {
return Err(BackhandError::InvalidCompressionOption);
}
self.level = Some(level);
Ok(())
}
}
07070100000047000081A40000000000000000000000016854DB950000022B000000000000000000000000000000000000002900000000backhand-0.23.0/backhand/src/fragment.rs//! Data Fragment support
use deku::prelude::*;
use crate::data::DataSize;
pub(crate) const SIZE: usize =
std::mem::size_of::<u64>() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>();
#[derive(Copy, Clone, Debug, PartialEq, Eq, DekuRead, DekuWrite)]
#[deku(endian = "type_endian", ctx = "type_endian: deku::ctx::Endian")]
pub struct Fragment {
pub start: u64,
pub size: DataSize,
pub unused: u32,
}
impl Fragment {
pub fn new(start: u64, size: DataSize, unused: u32) -> Self {
Self { start, size, unused }
}
}
07070100000048000081A40000000000000000000000016854DB95000001A3000000000000000000000000000000000000002300000000backhand-0.23.0/backhand/src/id.rsuse deku::prelude::*;
/// 32 bit user and group IDs
#[derive(Debug, Copy, Clone, DekuRead, DekuWrite, PartialEq, Eq)]
#[deku(endian = "type_endian", ctx = "type_endian: deku::ctx::Endian")]
pub struct Id {
pub num: u32,
}
impl Id {
pub const SIZE: usize = (u32::BITS / 8) as usize;
pub fn new(num: u32) -> Id {
Id { num }
}
pub fn root() -> Vec<Id> {
vec![Id { num: 0 }]
}
}
07070100000049000081A40000000000000000000000016854DB9500001C7C000000000000000000000000000000000000002600000000backhand-0.23.0/backhand/src/inode.rs//! Index Node for file or directory
use core::fmt;
use std::io::{Cursor, Write};
use deku::prelude::*;
use crate::data::DataSize;
use crate::dir::DirectoryIndex;
use crate::entry::Entry;
use crate::kind::Kind;
use crate::metadata::MetadataWriter;
use crate::squashfs::SuperBlock;
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(ctx = "bytes_used: u64, block_size: u32, block_log: u16, type_endian: deku::ctx::Endian")]
#[deku(endian = "type_endian")]
pub struct Inode {
pub id: InodeId,
pub header: InodeHeader,
#[deku(ctx = "*id, bytes_used, block_size, block_log")]
pub inner: InodeInner,
}
impl Inode {
pub fn new(id: InodeId, header: InodeHeader, inner: InodeInner) -> Self {
Inode { id, header, inner }
}
/// Write to `m_writer`, creating Entry
pub(crate) fn to_bytes<'a>(
&self,
name: &'a [u8],
m_writer: &mut MetadataWriter,
superblock: &SuperBlock,
kind: &Kind,
) -> Entry<'a> {
let mut inode_bytes = Cursor::new(vec![]);
let mut writer = Writer::new(&mut inode_bytes);
self.to_writer(
&mut writer,
(
0xffff_ffff_ffff_ffff, // bytes_used is unused for ctx. set to max
superblock.block_size,
superblock.block_log,
kind.inner.type_endian,
),
)
.unwrap();
let start = m_writer.metadata_start;
let offset = m_writer.uncompressed_bytes.len() as u16;
m_writer.write_all(inode_bytes.get_ref()).unwrap();
Entry {
start,
offset,
inode: self.header.inode_number,
t: self.id,
name_size: name.len() as u16 - 1,
name,
}
}
}
#[derive(Debug, DekuRead, DekuWrite, Clone, Copy, PartialEq, Eq)]
#[deku(id_type = "u16")]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
#[repr(u16)]
#[rustfmt::skip]
pub enum InodeId {
BasicDirectory = 1,
BasicFile = 2,
BasicSymlink = 3,
BasicBlockDevice = 4,
BasicCharacterDevice = 5,
BasicNamedPipe = 6, // aka FIFO
BasicSocket = 7,
ExtendedDirectory = 8,
ExtendedFile = 9,
// TODO:
// Extended Symlink = 10
// Extended Block Device = 11
// Extended Character Device = 12
// Extended Named Pipe (FIFO) = 13
// Extended Socked = 14
}
impl InodeId {
pub(crate) fn into_base_type(self) -> Self {
match self {
Self::ExtendedDirectory => InodeId::BasicDirectory,
Self::ExtendedFile => InodeId::BasicFile,
_ => self,
}
}
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(
ctx = "endian: deku::ctx::Endian, id: InodeId, bytes_used: u64, block_size: u32, block_log: u16"
)]
#[deku(endian = "endian")]
#[deku(id = "id")]
pub enum InodeInner {
#[deku(id = "InodeId::BasicDirectory")]
BasicDirectory(BasicDirectory),
#[deku(id = "InodeId::BasicFile")]
BasicFile(#[deku(ctx = "block_size, block_log")] BasicFile),
#[deku(id = "InodeId::BasicSymlink")]
BasicSymlink(BasicSymlink),
#[deku(id = "InodeId::BasicBlockDevice")]
BasicBlockDevice(BasicDeviceSpecialFile),
#[deku(id = "InodeId::BasicCharacterDevice")]
BasicCharacterDevice(BasicDeviceSpecialFile),
#[deku(id = "InodeId::BasicNamedPipe")]
BasicNamedPipe(IPCNode),
#[deku(id = "InodeId::BasicSocket")]
BasicSocket(IPCNode),
#[deku(id = "InodeId::ExtendedDirectory")]
ExtendedDirectory(ExtendedDirectory),
#[deku(id = "InodeId::ExtendedFile")]
ExtendedFile(#[deku(ctx = "bytes_used, block_size, block_log")] ExtendedFile),
}
#[derive(Debug, DekuRead, DekuWrite, Clone, Copy, PartialEq, Eq, Default)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct InodeHeader {
pub permissions: u16,
/// index into id table
pub uid: u16,
/// index into id table
pub gid: u16,
pub mtime: u32,
pub inode_number: u32,
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct BasicDirectory {
pub block_index: u32,
pub link_count: u32,
pub file_size: u16,
pub block_offset: u16,
pub parent_inode: u32,
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct ExtendedDirectory {
pub link_count: u32,
pub file_size: u32,
pub block_index: u32,
pub parent_inode: u32,
pub index_count: u16,
pub block_offset: u16,
pub xattr_index: u32,
#[deku(count = "*index_count")]
pub dir_index: Vec<DirectoryIndex>,
}
#[allow(non_upper_case_globals)]
const TiB2: u128 = 0x200_0000_0000;
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian, block_size: u32, block_log: u16")]
pub struct BasicFile {
pub blocks_start: u32,
pub frag_index: u32,
pub block_offset: u32,
#[deku(assert = "((*file_size as u128) < TiB2)")]
pub file_size: u32,
#[deku(count = "block_count(block_size, block_log, *frag_index, *file_size as u64)")]
pub block_sizes: Vec<DataSize>,
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(
endian = "endian",
ctx = "endian: deku::ctx::Endian, bytes_used: u64, block_size: u32, block_log: u16"
)]
pub struct ExtendedFile {
pub blocks_start: u64,
#[deku(assert = "((*file_size as u128) < TiB2)")]
pub file_size: u64,
pub sparse: u64,
pub link_count: u32,
pub frag_index: u32,
pub block_offset: u32,
pub xattr_index: u32,
#[deku(count = "block_count(block_size, block_log, *frag_index, *file_size)")]
pub block_sizes: Vec<DataSize>,
}
fn block_count(block_size: u32, block_log: u16, fragment: u32, file_size: u64) -> u64 {
const NO_FRAGMENT: u32 = 0xffffffff;
if fragment == NO_FRAGMENT {
(file_size + u64::from(block_size) - 1) >> block_log
} else {
file_size >> block_log
}
}
#[derive(DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct BasicSymlink {
pub link_count: u32,
#[deku(assert = "*target_size < 256")]
pub target_size: u32,
#[deku(count = "target_size")]
pub target_path: Vec<u8>,
}
impl fmt::Debug for BasicSymlink {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BasicSymlink")
.field("link_count", &self.link_count)
.field("target_size", &self.target_size)
.field("target_path", &self.target())
.finish()
}
}
impl BasicSymlink {
pub fn target(&self) -> String {
std::str::from_utf8(&self.target_path).unwrap().to_string()
}
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct BasicDeviceSpecialFile {
pub link_count: u32,
pub device_number: u32,
}
#[derive(Debug, DekuRead, DekuWrite, Clone, PartialEq, Eq)]
#[deku(endian = "endian", ctx = "endian: deku::ctx::Endian")]
pub struct IPCNode {
pub link_count: u32,
}
0707010000004A000081A40000000000000000000000016854DB95000022F2000000000000000000000000000000000000002600000000backhand-0.23.0/backhand/src/kinds.rs//! Types of image formats
use core::fmt;
use std::sync::Arc;
use crate::compressor::{CompressionAction, DefaultCompressor};
/// Kind Magic - First 4 bytes of image
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum Magic {
/// Little Endian `b"hsqs"`
Little,
/// Big Endian `b"sqsh"`
Big,
}
impl Magic {
fn magic(self) -> [u8; 4] {
match self {
Self::Little => *b"hsqs",
Self::Big => *b"sqsh",
}
}
}
/// Kind Endian
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Endian {
Little,
Big,
}
pub struct InnerKind<C: CompressionAction + ?Sized + 'static + Send + Sync> {
/// Magic at the beginning of the image
pub(crate) magic: [u8; 4],
/// Endian used for all data types
pub(crate) type_endian: deku::ctx::Endian,
/// Endian used for Metadata Lengths
pub(crate) data_endian: deku::ctx::Endian,
/// Major version
pub(crate) version_major: u16,
/// Minor version
pub(crate) version_minor: u16,
/// Compression impl
pub(crate) compressor: &'static C,
}
/// Version of SquashFS, also supporting custom changes to SquashFS seen in 3rd-party firmware
///
/// See [Kind Constants](`crate::kind#constants`) for a list of custom Kinds
pub struct Kind {
/// "Easier for the eyes" type for the real Kind
pub(crate) inner: Arc<InnerKind<dyn CompressionAction + Send + Sync>>,
}
impl fmt::Debug for Kind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FilesystemWriter")
.field("magic", &self.inner.magic)
.field("type_endian", &self.inner.type_endian)
.field("data_endian", &self.inner.data_endian)
.field("version_major", &self.inner.version_major)
.field("version_minor", &self.inner.version_minor)
.finish()
}
}
impl Kind {
/// Create [`LE_V4_0`] with custom `compressor`.
///
/// Use other [`Kind`] functions such as [`Kind::with_magic`] to change other settings other than
/// `compressor`.
///
/// Use [`Kind::new_with_const`] when using a custom compressor with something other than
/// [`LE_V4_0`].
///
/// # Example
/// ```rust
/// # use backhand::{compression::Compressor, kind, FilesystemCompressor, kind::Kind, compression::CompressionAction, compression::DefaultCompressor, BackhandError};
/// # use backhand::SuperBlock;
/// # use std::io::Write;
/// #[derive(Copy, Clone)]
/// pub struct CustomCompressor;
///
/// // Special decompress that only has support for the Rust version of gzip: zune-inflate for
/// // decompression.
/// impl CompressionAction for CustomCompressor {
/// fn decompress(
/// &self,
/// bytes: &[u8],
/// out: &mut Vec<u8>,
/// compressor: Compressor,
/// ) -> Result<(), BackhandError> {
/// if let Compressor::Gzip = compressor {
/// out.resize(out.capacity(), 0);
/// let mut decompressor = libdeflater::Decompressor::new();
/// let amt = decompressor.zlib_decompress(&bytes, out).unwrap();
/// out.truncate(amt);
/// } else {
/// unimplemented!();
/// }
///
/// Ok(())
/// }
///
/// // Just pass to default compressor
/// fn compress(
/// &self,
/// bytes: &[u8],
/// fc: FilesystemCompressor,
/// block_size: u32,
/// ) -> Result<Vec<u8>, BackhandError> {
/// DefaultCompressor.compress(bytes, fc, block_size)
/// }
///
/// // pass the default options
/// fn compression_options(
/// &self,
/// _superblock: &mut SuperBlock,
/// _kind: &Kind,
/// _fs_compressor: FilesystemCompressor,
/// ) -> Result<Vec<u8>, BackhandError> {
/// DefaultCompressor.compression_options(_superblock, _kind, _fs_compressor)
/// }
/// }
///
/// let kind = Kind::new(&CustomCompressor);
/// ```
pub fn new<C: CompressionAction + Send + Sync>(compressor: &'static C) -> Self {
Self { inner: Arc::new(InnerKind { compressor, ..LE_V4_0 }) }
}
pub fn new_with_const<C: CompressionAction + Send + Sync>(
compressor: &'static C,
c: InnerKind<dyn CompressionAction + Send + Sync>,
) -> Self {
Self { inner: Arc::new(InnerKind { compressor, ..c }) }
}
/// From a string, return a kind
///
/// # Example
/// Get a default [`Kind`]
/// ```rust
/// # use backhand::{kind, kind::Kind};
/// let kind = Kind::from_target("le_v4_0").unwrap();
/// ```
/// # Returns
/// - `"le_v4_0"`: [`LE_V4_0`]
/// - `"be_v4_0"`: [`BE_V4_0`]
/// - `"avm_be_v4_0"`: [`AVM_BE_V4_0`]
pub fn from_target(s: &str) -> Result<Kind, String> {
let kind = match s {
"avm_be_v4_0" => AVM_BE_V4_0,
"be_v4_0" => BE_V4_0,
"le_v4_0" => LE_V4_0,
_ => return Err("not a valid kind".to_string()),
};
Ok(Kind { inner: Arc::new(kind) })
}
/// From a known Squashfs image Kind, return a [`Kind`]
///
/// # Example
/// Get a default [`Kind`]
///
/// ```rust
/// # use backhand::{kind, kind::Kind};
/// let kind = Kind::from_const(kind::LE_V4_0).unwrap();
/// ```
pub fn from_const(
inner: InnerKind<dyn CompressionAction + Send + Sync>,
) -> Result<Kind, String> {
Ok(Kind { inner: Arc::new(inner) })
}
// TODO: example
pub fn from_kind(kind: &Kind) -> Kind {
Self { inner: kind.inner.clone() }
}
/// Set magic type at the beginning of the image
// TODO: example
pub fn with_magic(mut self, magic: Magic) -> Self {
Arc::get_mut(&mut self.inner).unwrap().magic = magic.magic();
self
}
pub fn magic(&self) -> [u8; 4] {
self.inner.magic
}
/// Set endian used for data types
// TODO: example
pub fn with_type_endian(mut self, endian: Endian) -> Self {
match endian {
Endian::Little => {
Arc::get_mut(&mut self.inner).unwrap().type_endian = deku::ctx::Endian::Little;
}
Endian::Big => {
Arc::get_mut(&mut self.inner).unwrap().type_endian = deku::ctx::Endian::Big;
}
}
self
}
/// Set endian used for Metadata lengths
// TODO: example
pub fn with_data_endian(mut self, endian: Endian) -> Self {
match endian {
Endian::Little => {
Arc::get_mut(&mut self.inner).unwrap().data_endian = deku::ctx::Endian::Little;
}
Endian::Big => {
Arc::get_mut(&mut self.inner).unwrap().data_endian = deku::ctx::Endian::Big;
}
}
self
}
/// Set both type and data endian
// TODO: example
pub fn with_all_endian(mut self, endian: Endian) -> Self {
match endian {
Endian::Little => {
Arc::get_mut(&mut self.inner).unwrap().type_endian = deku::ctx::Endian::Little;
Arc::get_mut(&mut self.inner).unwrap().data_endian = deku::ctx::Endian::Little;
}
Endian::Big => {
Arc::get_mut(&mut self.inner).unwrap().type_endian = deku::ctx::Endian::Big;
Arc::get_mut(&mut self.inner).unwrap().data_endian = deku::ctx::Endian::Big;
}
}
self
}
/// Set major and minor version
// TODO: example
pub fn with_version(mut self, major: u16, minor: u16) -> Self {
Arc::get_mut(&mut self.inner).unwrap().version_major = major;
Arc::get_mut(&mut self.inner).unwrap().version_minor = minor;
self
}
}
/// Default `Kind` for linux kernel and squashfs-tools/mksquashfs. Little-Endian v4.0
pub const LE_V4_0: InnerKind<dyn CompressionAction + Send + Sync> = InnerKind {
magic: *b"hsqs",
type_endian: deku::ctx::Endian::Little,
data_endian: deku::ctx::Endian::Little,
version_major: 4,
version_minor: 0,
compressor: &DefaultCompressor,
};
/// Big-Endian Superblock v4.0
pub const BE_V4_0: InnerKind<dyn CompressionAction + Send + Sync> = InnerKind {
magic: *b"sqsh",
type_endian: deku::ctx::Endian::Big,
data_endian: deku::ctx::Endian::Big,
version_major: 4,
version_minor: 0,
compressor: &DefaultCompressor,
};
/// AVM Fritz!OS firmware support. Tested with: <https://github.com/dnicolodi/squashfs-avm-tools>
pub const AVM_BE_V4_0: InnerKind<dyn CompressionAction + Send + Sync> = InnerKind {
magic: *b"sqsh",
type_endian: deku::ctx::Endian::Big,
data_endian: deku::ctx::Endian::Little,
version_major: 4,
version_minor: 0,
compressor: &DefaultCompressor,
};
0707010000004B000081A40000000000000000000000016854DB9500000D86000000000000000000000000000000000000002400000000backhand-0.23.0/backhand/src/lib.rs//! Library and binaries for the reading, creating, and modification
//! of [SquashFS](https://en.wikipedia.org/wiki/SquashFS) file systems.
//!
//! ## Library
//! Add the following to your `Cargo.toml` file:
//! ```toml
//! [dependencies]
//! backhand = "0.23.0"
//! ```
//!
//! ### Reading
//! For reading an image and extracting its details and contents, use
//! [`FilesystemReader::from_reader`].
//!
//! ### Writing
//! For creating a modified or new image, use [`FilesystemWriter::from_fs_reader`].
//! [`FilesystemWriter`] can also be created from scratch, without a previous image to base itself
//! on.
//!
//!### Example
//!```rust,no_run
//! # use std::fs::File;
//! # use std::io::{Cursor, BufReader};
//! # use backhand::{FilesystemReader, FilesystemWriter, NodeHeader};
//! // read
//! let file = BufReader::new(File::open("file.squashfs").unwrap());
//! let read_filesystem = FilesystemReader::from_reader(file).unwrap();
//!
//! // convert to writer
//! let mut write_filesystem = FilesystemWriter::from_fs_reader(&read_filesystem).unwrap();
//!
//! // add file with data from slice
//! let d = NodeHeader::default();
//! let bytes = Cursor::new(b"Fear is the mind-killer.");
//! write_filesystem.push_file(bytes, "a/d/e/new_file", d);
//!
//! // add file with data from file
//! let new_file = File::open("dune").unwrap();
//! write_filesystem.push_file(new_file, "/root/dune", d);
//!
//! // replace a existing file
//! let bytes = Cursor::new(b"The sleeper must awaken.\n");
//! write_filesystem
//! .replace_file("/a/b/c/d/e/first_file", bytes)
//! .unwrap();
//!
//! // write into a new file
//! let mut output = File::create("modified.squashfs").unwrap();
//! write_filesystem.write(&mut output).unwrap();
//! ```
//!
//! # Features
#![cfg_attr(feature = "document-features", doc = document_features::document_features!())]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[cfg(doctest)]
#[doc = include_str!("../../README.md")]
type _ReadmeTest = ();
mod compressor;
mod data;
mod dir;
mod entry;
mod error;
mod export;
mod filesystem;
mod fragment;
mod id;
mod inode;
mod kinds;
mod metadata;
mod reader;
mod squashfs;
mod unix_string;
pub use crate::data::DataSize;
pub use crate::error::BackhandError;
pub use crate::export::Export;
pub use crate::filesystem::node::{
InnerNode, Node, NodeHeader, SquashfsBlockDevice, SquashfsCharacterDevice, SquashfsDir,
SquashfsFileReader, SquashfsFileWriter, SquashfsSymlink,
};
pub use crate::filesystem::reader::{FilesystemReader, FilesystemReaderFile};
#[cfg(not(feature = "parallel"))]
pub use crate::filesystem::reader_no_parallel::SquashfsReadFile;
#[cfg(feature = "parallel")]
pub use crate::filesystem::reader_parallel::SquashfsReadFile;
pub use crate::filesystem::writer::{
CompressionExtra, ExtraXz, FilesystemCompressor, FilesystemWriter,
};
pub use crate::fragment::Fragment;
pub use crate::id::Id;
pub use crate::inode::{BasicFile, Inode};
pub use crate::reader::BufReadSeek;
pub use crate::squashfs::{
Flags, Squashfs, SuperBlock, DEFAULT_BLOCK_SIZE, DEFAULT_PAD_LEN, MAX_BLOCK_SIZE,
MIN_BLOCK_SIZE,
};
/// Support the wonderful world of vendor formats
pub mod kind {
pub use crate::kinds::{Endian, Kind, Magic, AVM_BE_V4_0, BE_V4_0, LE_V4_0};
}
/// Compression Choice and Options
pub mod compression {
pub use crate::compressor::{
CompressionAction, CompressionOptions, Compressor, DefaultCompressor, Gzip, Lz4, Lzo, Xz,
Zstd,
};
}
0707010000004C000081A40000000000000000000000016854DB9500001379000000000000000000000000000000000000002900000000backhand-0.23.0/backhand/src/metadata.rsuse std::collections::VecDeque;
use std::io::{self, Read, Seek, Write};
use deku::prelude::*;
use tracing::trace;
use crate::error::BackhandError;
use crate::filesystem::writer::FilesystemCompressor;
use crate::kinds::Kind;
use crate::squashfs::SuperBlock;
pub const METADATA_MAXSIZE: usize = 0x2000;
const METDATA_UNCOMPRESSED: u16 = 1 << 15;
pub(crate) struct MetadataWriter {
compressor: FilesystemCompressor,
block_size: u32,
/// Offset from the beginning of the metadata block last written
pub(crate) metadata_start: u32,
// All current bytes that are uncompressed
pub(crate) uncompressed_bytes: VecDeque<u8>,
// All current bytes that are compressed or uncompressed
pub(crate) final_bytes: Vec<(bool, Vec<u8>)>,
pub kind: Kind,
}
impl MetadataWriter {
pub fn new(compressor: FilesystemCompressor, block_size: u32, kind: Kind) -> Self {
Self {
compressor,
block_size,
metadata_start: 0,
uncompressed_bytes: VecDeque::new(),
final_bytes: vec![],
kind,
}
}
fn add_block(&mut self) -> io::Result<()> {
// uncompress data that will create the metablock
let uncompressed_len = self.uncompressed_bytes.len().min(METADATA_MAXSIZE);
if uncompressed_len == 0 {
// nothing to add
return Ok(());
}
if self.uncompressed_bytes.as_slices().0.len() < uncompressed_len {
self.uncompressed_bytes.make_contiguous();
}
let uncompressed = &self.uncompressed_bytes.as_slices().0[0..uncompressed_len];
trace!("time to compress");
// "Write" the to the saved metablock
let compressed =
self.kind.inner.compressor.compress(uncompressed, self.compressor, self.block_size)?;
// Remove the data consumed, if the uncompressed data is smalled, use it.
let (compressed, metadata) = if compressed.len() > uncompressed_len {
let uncompressed = self.uncompressed_bytes.drain(0..uncompressed_len).collect();
(false, uncompressed)
} else {
self.uncompressed_bytes.drain(0..uncompressed_len);
(true, compressed)
};
// Metadata len + bytes + last metadata_start
self.metadata_start += 2 + metadata.len() as u32;
trace!("new metadata start: {:#02x?}", self.metadata_start);
self.final_bytes.push((compressed, metadata));
trace!("LEN: {:02x?}", self.uncompressed_bytes.len());
Ok(())
}
pub fn finalize<W: Write + Seek>(&mut self, mut out: W) -> Result<(), BackhandError> {
//add any remaining data
while !self.uncompressed_bytes.is_empty() {
self.add_block()?;
}
// write all the metadata blocks
for (compressed, compressed_bytes) in &self.final_bytes {
trace!("len: {:02x?}", compressed_bytes.len());
// if uncompressed, set the highest bit of len
let len =
compressed_bytes.len() as u16 | if *compressed { 0 } else { 1 << (u16::BITS - 1) };
let mut writer = Writer::new(&mut out);
len.to_writer(&mut writer, self.kind.inner.data_endian)?;
out.write_all(compressed_bytes)?;
}
Ok(())
}
}
impl Write for MetadataWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// add all of buf into uncompressed
self.uncompressed_bytes.write_all(buf)?;
// if there is too much uncompressed data, create a new metadata block
while self.uncompressed_bytes.len() >= METADATA_MAXSIZE {
self.add_block()?;
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub fn read_block<R: Read + Seek>(
reader: &mut R,
superblock: &SuperBlock,
kind: &Kind,
) -> Result<Vec<u8>, BackhandError> {
let mut deku_reader = Reader::new(&mut *reader);
let metadata_len = u16::from_reader_with_ctx(&mut deku_reader, kind.inner.data_endian)?;
let byte_len = len(metadata_len);
tracing::trace!("len: 0x{:02x?}", byte_len);
let mut buf = vec![0u8; byte_len as usize];
reader.read_exact(&mut buf)?;
let bytes = if is_compressed(metadata_len) {
tracing::trace!("compressed");
let mut out = Vec::with_capacity(8 * 1024);
kind.inner.compressor.decompress(&buf, &mut out, superblock.compressor)?;
out
} else {
tracing::trace!("uncompressed");
buf
};
tracing::trace!("uncompressed size: 0x{:02x?}", bytes.len());
Ok(bytes)
}
/// Check is_compressed bit within raw `len`
pub fn is_compressed(len: u16) -> bool {
len & METDATA_UNCOMPRESSED == 0
}
/// Get actual length of `data` following `len` from unedited `len`
pub fn len(len: u16) -> u16 {
len & !(METDATA_UNCOMPRESSED)
}
pub fn set_if_uncompressed(len: u16) -> u16 {
len | METDATA_UNCOMPRESSED
}
0707010000004D000081A40000000000000000000000016854DB950000216F000000000000000000000000000000000000002700000000backhand-0.23.0/backhand/src/reader.rs//! Reader traits
use std::collections::HashMap;
use std::io::{BufRead, Cursor, Read, Seek, SeekFrom, Write};
use deku::prelude::*;
use solana_nohash_hasher::IntMap;
use tracing::{error, trace};
use crate::error::BackhandError;
use crate::export::Export;
use crate::fragment::Fragment;
use crate::id::Id;
use crate::inode::Inode;
use crate::kinds::Kind;
use crate::metadata::METADATA_MAXSIZE;
use crate::squashfs::{SuperBlock, NOT_SET};
use crate::{fragment, metadata};
/// Private struct containing logic to read the `Squashfs` section from a file
#[derive(Debug)]
pub(crate) struct SquashfsReaderWithOffset<R: BufReadSeek> {
io: R,
/// Offset from start of file to squashfs
offset: u64,
}
impl<R: BufReadSeek> SquashfsReaderWithOffset<R> {
pub fn new(mut io: R, offset: u64) -> std::io::Result<Self> {
io.seek(SeekFrom::Start(offset))?;
Ok(Self { io, offset })
}
}
impl<R> BufRead for SquashfsReaderWithOffset<R>
where
R: BufReadSeek,
{
fn fill_buf(&mut self) -> std::io::Result<&[u8]> {
self.io.fill_buf()
}
fn consume(&mut self, amt: usize) {
self.io.consume(amt)
}
}
impl<R> Read for SquashfsReaderWithOffset<R>
where
R: BufReadSeek,
{
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.io.read(buf)
}
}
impl<R> Seek for SquashfsReaderWithOffset<R>
where
R: BufReadSeek,
{
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let seek = match pos {
SeekFrom::Start(start) => SeekFrom::Start(self.offset + start),
seek => seek,
};
self.io.seek(seek).map(|x| x - self.offset)
}
}
/// Pseudo-Trait for BufRead + Seek
pub trait BufReadSeek: BufRead + Seek + Send {}
impl<T: BufRead + Seek + Send> BufReadSeek for T {}
/// Pseudo-Trait for Write + Seek
pub trait WriteSeek: Write + Seek {}
impl<T: Write + Seek> WriteSeek for T {}
impl<T: BufReadSeek> SquashFsReader for T {}
/// Squashfs data extraction methods implemented over [`Read`] and [`Seek`]
pub trait SquashFsReader: BufReadSeek + Sized {
/// Cache Inode Table
/// # Returns
/// - `(RootInode, HashMap<inode_number, Inode>)`
fn inodes(
&mut self,
superblock: &SuperBlock,
kind: &Kind,
) -> Result<(Inode, IntMap<u32, Inode>), BackhandError> {
let (map, bytes) = self.uncompress_metadatas(
superblock.inode_table,
superblock,
superblock.dir_table,
kind,
)?;
let mut inodes = IntMap::default();
// Be nice the allocator, and only allocate a max of u16::MAX count of Indoes
inodes.try_reserve(superblock.inode_count.min(u16::MAX as u32) as usize)?;
let byte_len = bytes.len();
let mut cursor = Cursor::new(bytes);
let mut reader = Reader::new(&mut cursor);
while reader.bits_read != byte_len * 8 {
let inode = Inode::from_reader_with_ctx(
&mut reader,
(
superblock.bytes_used,
superblock.block_size,
superblock.block_log,
kind.inner.type_endian,
),
)?;
inodes.insert(inode.header.inode_number, inode);
}
if inodes.len() != superblock.inode_count as usize {
error!("inodes {} != superblock.inode_count {}", inodes.len(), superblock.inode_count);
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
let root_inode_start = (superblock.root_inode >> 16) as usize;
let root_inode_offset = (superblock.root_inode & 0xffff) as usize;
let Some(root_offset) = map.get(&(root_inode_start as u64)) else {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
};
let mut cursor = reader.into_inner();
cursor.seek(SeekFrom::Start(root_offset + root_inode_offset as u64))?;
let mut reader = Reader::new(&mut cursor);
let root_inode = Inode::from_reader_with_ctx(
&mut reader,
(
superblock.bytes_used,
superblock.block_size,
superblock.block_log,
kind.inner.type_endian,
),
)?;
Ok((root_inode, inodes))
}
/// Parse required number of `Metadata`s uncompressed blocks required for `Dir`s
///
/// # Returns
/// - `(HashMap<offset_from_seek, offset_from_bytes>, Bytes)`
fn uncompress_metadatas(
&mut self,
seek: u64,
superblock: &SuperBlock,
end_ptr: u64,
kind: &Kind,
) -> Result<(IntMap<u64, u64>, Vec<u8>), BackhandError> {
self.seek(SeekFrom::Start(seek))?;
let mut map = HashMap::default();
let mut all_bytes = vec![];
while self.stream_position()? != end_ptr {
let metadata_start = self.stream_position()?;
let mut bytes = metadata::read_block(self, superblock, kind)?;
map.insert(metadata_start - seek, all_bytes.len() as u64);
all_bytes.append(&mut bytes);
}
Ok((map, all_bytes))
}
/// Parse and Cache Fragment Table
fn fragments(
&mut self,
superblock: &SuperBlock,
kind: &Kind,
) -> Result<Option<(u64, Vec<Fragment>)>, BackhandError> {
if superblock.frag_count == 0 || superblock.frag_table == NOT_SET {
return Ok(None);
}
let (ptr, table) = self.lookup_table::<Fragment>(
superblock,
superblock.frag_table,
u64::from(superblock.frag_count) * fragment::SIZE as u64,
kind,
)?;
Ok(Some((ptr, table)))
}
/// Parse Export Table
fn export(
&mut self,
superblock: &SuperBlock,
kind: &Kind,
) -> Result<Option<(u64, Vec<Export>)>, BackhandError> {
if superblock.nfs_export_table_exists() && superblock.export_table != NOT_SET {
let ptr = superblock.export_table;
let count = (superblock.inode_count as f32 / 1024_f32).ceil() as u64;
let (ptr, table) = self.lookup_table::<Export>(superblock, ptr, count, kind)?;
Ok(Some((ptr, table)))
} else {
Ok(None)
}
}
/// Parse and Cache ID Table
fn id(
&mut self,
superblock: &SuperBlock,
kind: &Kind,
) -> Result<(u64, Vec<Id>), BackhandError> {
let ptr = superblock.id_table;
let count = superblock.id_count as u64;
let (ptr, table) = self.lookup_table::<Id>(superblock, ptr, count, kind)?;
Ok((ptr, table))
}
/// Parse Lookup Table
fn lookup_table<T>(
&mut self,
superblock: &SuperBlock,
seek: u64,
size: u64,
kind: &Kind,
) -> Result<(u64, Vec<T>), BackhandError>
where
T: for<'a> DekuReader<'a, deku::ctx::Endian>,
{
// find the pointer at the initial offset
trace!("seek: {:02x?}", seek);
self.seek(SeekFrom::Start(seek))?;
let buf: &mut [u8] = &mut [0u8; 8];
self.read_exact(buf)?;
trace!("{:02x?}", buf);
let mut cursor = Cursor::new(buf);
let mut deku_reader = Reader::new(&mut cursor);
let ptr = u64::from_reader_with_ctx(&mut deku_reader, kind.inner.type_endian)?;
let block_count = (size as f32 / METADATA_MAXSIZE as f32).ceil() as u64;
trace!("ptr: {:02x?}", ptr);
let table = self.metadata_with_count::<T>(superblock, ptr, block_count, kind)?;
Ok((ptr, table))
}
/// Parse count of `Metadata` block at offset into `T`
fn metadata_with_count<T>(
&mut self,
superblock: &SuperBlock,
seek: u64,
count: u64,
kind: &Kind,
) -> Result<Vec<T>, BackhandError>
where
T: for<'a> DekuReader<'a, deku::ctx::Endian>,
{
trace!("seek: {:02x?}", seek);
self.seek(SeekFrom::Start(seek))?;
let mut all_bytes = vec![];
for _ in 0..count {
let mut bytes = metadata::read_block(self, superblock, kind)?;
all_bytes.append(&mut bytes);
}
let mut ret_vec = vec![];
// Read until we fail to turn bytes into `T`
let mut cursor = Cursor::new(all_bytes);
let mut container = Reader::new(&mut cursor);
while let Ok(t) = T::from_reader_with_ctx(&mut container, kind.inner.type_endian) {
ret_vec.push(t);
}
Ok(ret_vec)
}
}
0707010000004E000081A40000000000000000000000016854DB950000604F000000000000000000000000000000000000002900000000backhand-0.23.0/backhand/src/squashfs.rs//! Read from on-disk image
use std::ffi::OsString;
use std::io::{Cursor, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::Mutex;
use std::sync::{Arc, RwLock};
use deku::prelude::*;
use solana_nohash_hasher::IntMap;
use tracing::{error, info, trace};
use crate::compressor::{CompressionOptions, Compressor};
use crate::dir::Dir;
use crate::error::BackhandError;
use crate::filesystem::node::{InnerNode, Nodes};
use crate::fragment::Fragment;
use crate::inode::{Inode, InodeId, InodeInner};
use crate::kinds::{Kind, LE_V4_0};
use crate::reader::{BufReadSeek, SquashFsReader, SquashfsReaderWithOffset};
use crate::unix_string::OsStringExt;
use crate::{
metadata, Export, FilesystemReader, Id, Node, NodeHeader, SquashfsBlockDevice,
SquashfsCharacterDevice, SquashfsDir, SquashfsFileReader, SquashfsSymlink,
};
/// 128KiB
pub const DEFAULT_BLOCK_SIZE: u32 = 0x20000;
/// 4KiB
pub const DEFAULT_PAD_LEN: u32 = 0x1000;
/// log2 of 128KiB
const DEFAULT_BLOCK_LOG: u16 = 0x11;
/// 1MiB
pub const MAX_BLOCK_SIZE: u32 = 0x10_0000;
/// 4KiB
pub const MIN_BLOCK_SIZE: u32 = 0x1000;
/// Contains important information about the archive, including the locations of other sections
#[derive(Debug, Copy, Clone, DekuRead, DekuWrite, PartialEq, Eq)]
#[deku(
endian = "ctx_type_endian",
ctx = "ctx_magic: [u8; 4], ctx_version_major: u16, ctx_version_minor: u16, ctx_type_endian: deku::ctx::Endian"
)]
pub struct SuperBlock {
/// Must be set to 0x73717368 ("hsqs" on disk).
#[deku(assert_eq = "ctx_magic")]
pub magic: [u8; 4],
/// The number of inodes stored in the archive.
pub inode_count: u32,
/// Last modification time of the archive. Count seconds since 00:00, Jan 1st 1970 UTC (not counting leap seconds).
/// This is unsigned, so it expires in the year 2106 (as opposed to 2038).
pub mod_time: u32,
/// The size of a data block in bytes. Must be a power of two between 4096 (4k) and 1048576 (1 MiB).
pub block_size: u32,
/// The number of entries in the fragment table.
pub frag_count: u32,
/// Compressor used for data
pub compressor: Compressor,
/// The log2 of the block size. If the two fields do not agree, the archive is considered corrupted.
pub block_log: u16,
/// Bit wise OR of the flag bits
pub flags: u16,
/// The number of entries in the ID lookup table.
pub id_count: u16,
#[deku(assert_eq = "ctx_version_major")]
/// Major version of the format. Must be set to 4.
pub version_major: u16,
#[deku(assert_eq = "ctx_version_minor")]
/// Minor version of the format. Must be set to 0.
pub version_minor: u16,
/// A reference to the inode of the root directory.
pub root_inode: u64,
/// The number of bytes used by the archive.
/// Because SquashFS archives must be padded to a multiple of the underlying device block size, this can be less than the actual file size.
pub bytes_used: u64,
pub id_table: u64,
//TODO: add read into Squashfs
pub xattr_table: u64,
pub inode_table: u64,
pub dir_table: u64,
pub frag_table: u64,
//TODO: add read into Squashfs
pub export_table: u64,
}
pub const NOT_SET: u64 = 0xffff_ffff_ffff_ffff;
impl SuperBlock {
/// flag value
pub fn inodes_uncompressed(&self) -> bool {
self.flags & Flags::InodesStoredUncompressed as u16 != 0
}
/// flag value
pub fn data_block_stored_uncompressed(&self) -> bool {
self.flags & Flags::DataBlockStoredUncompressed as u16 != 0
}
/// flag value
pub fn fragments_stored_uncompressed(&self) -> bool {
self.flags & Flags::FragmentsStoredUncompressed as u16 != 0
}
/// flag value
pub fn fragments_are_not_used(&self) -> bool {
self.flags & Flags::FragmentsAreNotUsed as u16 != 0
}
/// flag value
pub fn fragments_are_always_generated(&self) -> bool {
self.flags & Flags::FragmentsAreAlwaysGenerated as u16 != 0
}
/// flag value
pub fn data_has_been_deduplicated(&self) -> bool {
self.flags & Flags::DataHasBeenDeduplicated as u16 != 0
}
/// flag value
pub fn nfs_export_table_exists(&self) -> bool {
self.flags & Flags::NFSExportTableExists as u16 != 0
}
/// flag value
pub fn xattrs_are_stored_uncompressed(&self) -> bool {
self.flags & Flags::XattrsAreStoredUncompressed as u16 != 0
}
/// flag value
pub fn no_xattrs_in_archive(&self) -> bool {
self.flags & Flags::NoXattrsInArchive as u16 != 0
}
/// flag value
pub fn compressor_options_are_present(&self) -> bool {
self.flags & Flags::CompressorOptionsArePresent as u16 != 0
}
}
impl SuperBlock {
pub fn new(compressor: Compressor, kind: Kind) -> Self {
Self {
magic: kind.inner.magic,
inode_count: 0,
mod_time: 0,
block_size: DEFAULT_BLOCK_SIZE,
frag_count: 0,
compressor,
block_log: DEFAULT_BLOCK_LOG,
flags: 0,
id_count: 0,
version_major: kind.inner.version_major,
version_minor: kind.inner.version_minor,
root_inode: 0,
bytes_used: 0,
id_table: 0,
xattr_table: NOT_SET,
inode_table: 0,
dir_table: 0,
frag_table: NOT_SET,
export_table: NOT_SET,
}
}
}
#[rustfmt::skip]
#[allow(dead_code)]
#[derive(Debug, Copy, Clone)]
pub enum Flags {
InodesStoredUncompressed = 0b0000_0000_0000_0001,
DataBlockStoredUncompressed = 0b0000_0000_0000_0010,
Unused = 0b0000_0000_0000_0100,
FragmentsStoredUncompressed = 0b0000_0000_0000_1000,
FragmentsAreNotUsed = 0b0000_0000_0001_0000,
FragmentsAreAlwaysGenerated = 0b0000_0000_0010_0000,
DataHasBeenDeduplicated = 0b0000_0000_0100_0000,
NFSExportTableExists = 0b0000_0000_1000_0000,
XattrsAreStoredUncompressed = 0b0000_0001_0000_0000,
NoXattrsInArchive = 0b0000_0010_0000_0000,
CompressorOptionsArePresent = 0b0000_0100_0000_0000,
}
#[derive(Default, Clone, Debug)]
pub(crate) struct Cache {
/// The first time a fragment bytes is read, those bytes are added to this map with the key
/// representing the start position
pub(crate) fragment_cache: IntMap<u64, Vec<u8>>,
}
/// Squashfs Image initial read information
///
/// See [`FilesystemReader`] for a representation with the data extracted and uncompressed.
pub struct Squashfs<'b> {
pub kind: Kind,
pub superblock: SuperBlock,
/// Compression options that are used for the Compressor located after the Superblock
pub compression_options: Option<CompressionOptions>,
// Inode Cache `<InodeNumber, Inode>`
pub inodes: IntMap<u32, Inode>,
/// Root Inode
pub root_inode: Inode,
/// Bytes containing Directory Table `(<OffsetFromImage, OffsetInData>, Data)`
pub dir_blocks: (IntMap<u64, u64>, Vec<u8>),
/// Fragments Lookup Table Cache
pub fragments: Option<Vec<Fragment>>,
/// Export Lookup Table Cache
pub export: Option<Vec<Export>>,
/// Id Lookup Table Cache
pub id: Vec<Id>,
//file reader
file: Box<dyn BufReadSeek + 'b>,
}
impl<'b> Squashfs<'b> {
/// Read Superblock and Compression Options at current `reader` offset without parsing inodes
/// and dirs
///
/// Used for unsquashfs (extraction and --stat)
pub fn superblock_and_compression_options(
reader: &mut Box<dyn BufReadSeek + 'b>,
kind: &Kind,
) -> Result<(SuperBlock, Option<CompressionOptions>), BackhandError> {
// Parse SuperBlock
let mut container = Reader::new(&mut *reader);
let superblock = SuperBlock::from_reader_with_ctx(
&mut container,
(
kind.inner.magic,
kind.inner.version_major,
kind.inner.version_minor,
kind.inner.type_endian,
),
)?;
let block_size = superblock.block_size;
let power_of_two = block_size != 0 && (block_size & (block_size - 1)) == 0;
if !(MIN_BLOCK_SIZE..=MAX_BLOCK_SIZE).contains(&block_size) || !power_of_two {
error!("block_size({:#02x}) invalid", superblock.block_size);
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
if (superblock.block_size as f32).log2() != superblock.block_log as f32 {
error!("block size.log2() != block_log");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
// Parse Compression Options, if any
info!("Reading Compression options");
let compression_options = if superblock.compressor != Compressor::None
&& superblock.compressor_options_are_present()
{
let mut bytes = metadata::read_block(reader, &superblock, kind)?;
let mut cursor = Cursor::new(&mut bytes);
let mut reader = Reader::new(&mut cursor);
// data -> compression options
match CompressionOptions::from_reader_with_ctx(
&mut reader,
(kind.inner.type_endian, superblock.compressor),
) {
Ok(co) => {
if !reader.end() {
error!("invalid compression, not all bytes read");
None
} else {
Some(co)
}
}
Err(e) => {
error!("invalid compression options: {e:?}, not using");
None
}
}
} else {
None
};
info!("compression_options: {compression_options:02x?}");
Ok((superblock, compression_options))
}
/// Create `Squashfs` from `Read`er, with the resulting squashfs having read all fields needed
/// to regenerate the original squashfs and interact with the fs in memory without needing to
/// read again from `Read`er. `reader` needs to start with the beginning of the Image.
pub fn from_reader(reader: impl BufReadSeek + 'b) -> Result<Self, BackhandError> {
Self::from_reader_with_offset(reader, 0)
}
/// Same as [`Self::from_reader`], but seek'ing to `offset` in `reader` before Reading
///
/// Uses default [`Kind`]: [`LE_V4_0`]
pub fn from_reader_with_offset(
reader: impl BufReadSeek + 'b,
offset: u64,
) -> Result<Self, BackhandError> {
Self::from_reader_with_offset_and_kind(reader, offset, Kind { inner: Arc::new(LE_V4_0) })
}
/// Same as [`Self::from_reader_with_offset`], but including custom `kind`
pub fn from_reader_with_offset_and_kind(
reader: impl BufReadSeek + 'b,
offset: u64,
kind: Kind,
) -> Result<Self, BackhandError> {
let reader: Box<dyn BufReadSeek + 'b> = if offset == 0 {
Box::new(reader)
} else {
let reader = SquashfsReaderWithOffset::new(reader, offset)?;
Box::new(reader)
};
Self::inner_from_reader_with_offset_and_kind(reader, kind)
}
fn inner_from_reader_with_offset_and_kind(
mut reader: Box<dyn BufReadSeek + 'b>,
kind: Kind,
) -> Result<Self, BackhandError> {
let (superblock, compression_options) =
Self::superblock_and_compression_options(&mut reader, &kind)?;
// Check if legal image
let total_length = reader.seek(SeekFrom::End(0))?;
reader.rewind()?;
if superblock.bytes_used > total_length {
error!("corrupted or invalid bytes_used");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
// check required fields
if superblock.id_table > total_length {
error!("corrupted or invalid xattr_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
if superblock.inode_table > total_length {
error!("corrupted or invalid inode_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
if superblock.dir_table > total_length {
error!("corrupted or invalid dir_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
// check optional fields
if superblock.xattr_table != NOT_SET && superblock.xattr_table > total_length {
error!("corrupted or invalid frag_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
if superblock.frag_table != NOT_SET && superblock.frag_table > total_length {
error!("corrupted or invalid frag_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
if superblock.export_table != NOT_SET && superblock.export_table > total_length {
error!("corrupted or invalid export_table");
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
// Read all fields from filesystem to make a Squashfs
info!("Reading Inodes");
let (root_inode, inodes) = reader.inodes(&superblock, &kind)?;
info!("Reading Fragments");
let fragments = reader.fragments(&superblock, &kind)?;
let fragment_ptr = fragments.as_ref().map(|frag| frag.0);
let fragment_table = fragments.map(|a| a.1);
info!("Reading Exports");
let export = reader.export(&superblock, &kind)?;
let export_ptr = export.as_ref().map(|export| export.0);
let export_table = export.map(|a| a.1);
info!("Reading Ids");
let id = reader.id(&superblock, &kind)?;
let id_ptr = id.0;
let id_table = id.1;
let last_dir_position = if let Some(fragment_ptr) = fragment_ptr {
trace!("using fragment for end of dir");
fragment_ptr
} else if let Some(export_ptr) = export_ptr {
trace!("using export for end of dir");
export_ptr
} else {
trace!("using id for end of dir");
id_ptr
};
info!("Reading Dirs");
let dir_blocks = reader.uncompress_metadatas(
superblock.dir_table,
&superblock,
last_dir_position,
&kind,
)?;
let squashfs = Squashfs {
kind,
superblock,
compression_options,
inodes,
root_inode,
dir_blocks,
fragments: fragment_table,
export: export_table,
id: id_table,
file: reader,
};
// show info about flags
if superblock.inodes_uncompressed() {
info!("flag: inodes uncompressed");
}
if superblock.data_block_stored_uncompressed() {
info!("flag: data blocks stored uncompressed");
}
if superblock.fragments_stored_uncompressed() {
info!("flag: fragments stored uncompressed");
}
if superblock.fragments_are_not_used() {
info!("flag: fragments are not used");
}
if superblock.fragments_are_always_generated() {
info!("flag: fragments are always generated");
}
if superblock.data_has_been_deduplicated() {
info!("flag: data has been duplicated");
}
if superblock.nfs_export_table_exists() {
info!("flag: nfs export table exists");
}
if superblock.xattrs_are_stored_uncompressed() {
info!("flag: xattrs are stored uncompressed");
}
if superblock.compressor_options_are_present() {
info!("flag: compressor options are present");
}
info!("Successful Read");
Ok(squashfs)
}
/// # Returns
/// - `Ok(Some(Vec<Dir>))` when found dir
/// - `Ok(None)` when empty dir
pub(crate) fn dir_from_index(
&self,
block_index: u64,
file_size: u32,
block_offset: usize,
) -> Result<Option<Vec<Dir>>, BackhandError> {
trace!("- block index : {:02x?}", block_index);
trace!("- file_size : {:02x?}", file_size);
trace!("- block offset: {:02x?}", block_offset);
if file_size < 4 {
return Ok(None);
}
let Some(offset) = self.dir_blocks.0.get(&block_index) else {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
};
let Some(block) = &self.dir_blocks.1.get(*offset as usize..) else {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
};
if (block.len() as u32) < (block_offset as u32 + file_size - 3) {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
}
let bytes = &block[block_offset..][..file_size as usize - 3];
let mut dirs = vec![];
// Read until we fail to turn bytes into `T`
let mut cursor = Cursor::new(bytes);
let mut container = Reader::new(&mut cursor);
while let Ok(t) = Dir::from_reader_with_ctx(&mut container, self.kind.inner.type_endian) {
dirs.push(t);
}
trace!("finish");
Ok(Some(dirs))
}
fn extract_dir(
&self,
fullpath: &mut PathBuf,
root: &mut Nodes<SquashfsFileReader>,
dir_inode: &Inode,
id_table: &[Id],
) -> Result<(), BackhandError> {
let dirs = match &dir_inode.inner {
InodeInner::BasicDirectory(basic_dir) => {
trace!("BASIC_DIR inodes: {:02x?}", basic_dir);
self.dir_from_index(
u64::from(basic_dir.block_index),
u32::from(basic_dir.file_size),
basic_dir.block_offset as usize,
)?
}
InodeInner::ExtendedDirectory(ext_dir) => {
trace!("EXT_DIR: {:#02x?}", ext_dir);
self.dir_from_index(
u64::from(ext_dir.block_index),
ext_dir.file_size,
ext_dir.block_offset as usize,
)?
}
_ => return Err(BackhandError::UnexpectedInode(dir_inode.inner.clone())),
};
if let Some(dirs) = dirs {
for d in &dirs {
trace!("extracting entry: {:#?}", d.dir_entries);
for entry in &d.dir_entries {
let Ok(inode_key) = (d.inode_num as i32 + entry.inode_offset as i32).try_into()
else {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
};
let Some(found_inode) = &self.inodes.get(&inode_key) else {
return Err(BackhandError::CorruptedOrInvalidSquashfs);
};
let header = found_inode.header;
fullpath.push(entry.name()?);
let inner: InnerNode<SquashfsFileReader> = match entry.t {
// BasicDirectory, ExtendedDirectory
InodeId::BasicDirectory | InodeId::ExtendedDirectory => {
// its a dir, extract all children inodes
if *found_inode == dir_inode {
error!("self referential dir to already read inode");
return Err(BackhandError::UnexpectedInode(
dir_inode.inner.clone(),
));
}
self.extract_dir(fullpath, root, found_inode, &self.id)?;
InnerNode::Dir(SquashfsDir::default())
}
// BasicFile
InodeId::BasicFile => {
let inner = match &found_inode.inner {
InodeInner::BasicFile(file) => {
SquashfsFileReader::Basic(file.clone())
}
InodeInner::ExtendedFile(file) => {
SquashfsFileReader::Extended(file.clone())
}
_ => {
return Err(BackhandError::UnexpectedInode(
found_inode.inner.clone(),
))
}
};
InnerNode::File(inner)
}
// Basic Symlink
InodeId::BasicSymlink => {
let link = self.symlink_target_path(found_inode)?;
InnerNode::Symlink(SquashfsSymlink { link })
}
// Basic CharacterDevice
InodeId::BasicCharacterDevice => {
let device_number = Self::char_device_number(found_inode)?;
InnerNode::CharacterDevice(SquashfsCharacterDevice { device_number })
}
// Basic CharacterDevice
InodeId::BasicBlockDevice => {
let device_number = Self::block_device_number(found_inode)?;
InnerNode::BlockDevice(SquashfsBlockDevice { device_number })
}
InodeId::BasicNamedPipe => InnerNode::NamedPipe,
InodeId::BasicSocket => InnerNode::Socket,
InodeId::ExtendedFile => {
return Err(BackhandError::UnsupportedInode(found_inode.inner.clone()))
}
};
let node = Node::new(
fullpath.clone(),
NodeHeader::from_inode(header, id_table)?,
inner,
);
root.nodes.push(node);
fullpath.pop();
}
}
}
//TODO: todo!("verify all the paths are valid");
Ok(())
}
/// Symlink target path
///
/// # Returns
/// `Ok(target_path)`
fn symlink_target_path(&self, inode: &Inode) -> Result<PathBuf, BackhandError> {
if let InodeInner::BasicSymlink(basic_sym) = &inode.inner {
let path = OsString::from_vec(basic_sym.target_path.clone());
return Ok(PathBuf::from(path));
}
error!("symlink not found");
Err(BackhandError::FileNotFound)
}
/// Char Device Number
///
/// # Returns
/// `Ok(dev_num)`
fn char_device_number(inode: &Inode) -> Result<u32, BackhandError> {
if let InodeInner::BasicCharacterDevice(spc_file) = &inode.inner {
return Ok(spc_file.device_number);
}
error!("char dev not found");
Err(BackhandError::FileNotFound)
}
/// Block Device Number
///
/// # Returns
/// `Ok(dev_num)`
fn block_device_number(inode: &Inode) -> Result<u32, BackhandError> {
if let InodeInner::BasicBlockDevice(spc_file) = &inode.inner {
return Ok(spc_file.device_number);
}
error!("block dev not found");
Err(BackhandError::FileNotFound)
}
/// Convert into [`FilesystemReader`] by extracting all file bytes and converting into a filesystem
/// like structure in-memory
pub fn into_filesystem_reader(self) -> Result<FilesystemReader<'b>, BackhandError> {
info!("creating fs tree");
let mut root = Nodes::new_root(NodeHeader::from_inode(self.root_inode.header, &self.id)?);
self.extract_dir(&mut PathBuf::from("/"), &mut root, &self.root_inode, &self.id)?;
root.nodes.sort();
info!("created fs tree");
let filesystem = FilesystemReader {
kind: self.kind,
block_size: self.superblock.block_size,
block_log: self.superblock.block_log,
compressor: self.superblock.compressor,
compression_options: self.compression_options,
mod_time: self.superblock.mod_time,
id_table: self.id,
fragments: self.fragments,
root,
reader: Mutex::new(Box::new(self.file)),
cache: RwLock::new(Cache::default()),
no_duplicate_files: self.superblock.data_has_been_deduplicated(),
};
Ok(filesystem)
}
}
0707010000004F000081A40000000000000000000000016854DB9500000446000000000000000000000000000000000000002C00000000backhand-0.23.0/backhand/src/unix_string.rsuse std::ffi::OsStr;
use std::ffi::OsString;
#[cfg(unix)]
use std::os::unix::ffi::OsStrExt as OsStrExtUnix;
#[cfg(unix)]
use std::os::unix::ffi::OsStringExt as OsStringExtUnix;
pub trait OsStrExt {
fn as_bytes(&self) -> &[u8];
fn from_bytes(slice: &[u8]) -> &Self;
}
#[cfg(unix)]
impl OsStrExt for OsStr {
fn as_bytes(&self) -> &[u8] {
OsStrExtUnix::as_bytes(self)
}
fn from_bytes(slice: &[u8]) -> &Self {
OsStrExtUnix::from_bytes(slice)
}
}
#[cfg(windows)]
impl OsStrExt for OsStr {
fn as_bytes(&self) -> &[u8] {
self.to_str().unwrap().as_bytes()
}
fn from_bytes(slice: &[u8]) -> &Self {
let string = std::str::from_utf8(slice).unwrap();
OsStr::new(string)
}
}
pub trait OsStringExt {
fn from_vec(vec: Vec<u8>) -> Self;
}
#[cfg(unix)]
impl OsStringExt for OsString {
fn from_vec(vec: Vec<u8>) -> Self {
OsStringExtUnix::from_vec(vec)
}
}
#[cfg(windows)]
impl OsStringExt for OsString {
fn from_vec(vec: Vec<u8>) -> Self {
OsStr::from_bytes(vec.as_slice()).into()
}
}
07070100000050000081ED0000000000000000000000016854DB9500000FDF000000000000000000000000000000000000001B00000000backhand-0.23.0/bench.bash#!/bin/bash
set -ex
QUICK_MODE=false
LAST_RELEASE="v0.22.0"
BACKHAND_LAST_RELEASE="./last-release/unsquashfs-backhand"
BACKHAND_NATIVE_GNU="./native-gnu/dist/unsquashfs-backhand"
BACKHAND_NATIVE_MUSL="./native-musl/x86_64-unknown-linux-musl/dist/unsquashfs-backhand"
BACKHAND="./target/dist/unsquashfs-backhand"
BACKHAND_MUSL="./target/x86_64-unknown-linux-musl/dist/unsquashfs-backhand"
UNSQUASHFS="/usr/bin/unsquashfs"
# Using dynamic linked xz for perf reasons and matching unsquashfs in this testing
FLAGS="--bins --locked --profile=dist --no-default-features --features xz --features zstd --features gzip --features backhand-parallel"
bench () {
echo "You might want to make sudo last longer...."
sudo -v
if $QUICK_MODE; then
cargo +stable build -p backhand-cli $FLAGS
hyperfine --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' --sort command --warmup 10 \
--command-name backhand-dist-gnu "$BACKHAND --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name squashfs-tools "$UNSQUASHFS -quiet -no-progress -d $(mktemp -d /tmp/BHXXX) -f -o $(($2)) -ignore-errors $1" \
--export-markdown bench-results/$3.md -i
else
curl -sL https://github.com/wcampbell0x2a/backhand/releases/download/$LAST_RELEASE/backhand-$LAST_RELEASE-x86_64-unknown-linux-musl.tar.gz | tar xz -C last-release
cargo +stable build -p backhand-cli $FLAGS --target x86_64-unknown-linux-musl
cargo +stable build -p backhand-cli $FLAGS
RUSTFLAGS='-C target-cpu=native' cargo +stable build -p backhand-cli $FLAGS --target-dir native-gnu
RUSTFLAGS='-C target-cpu=native' cargo +stable build -p backhand-cli --target x86_64-unknown-linux-musl $FLAGS --target-dir native-musl
hyperfine --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' --sort command --warmup 10 \
--command-name backhand-dist-${LAST_RELEASE}-musl "$BACKHAND_LAST_RELEASE --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name backhand-dist-musl "$BACKHAND_MUSL --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name backhand-dist-musl-native "$BACKHAND_NATIVE_MUSL --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name backhand-dist-gnu "$BACKHAND --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name backhand-dist-gnu-native "$BACKHAND_NATIVE_GNU --quiet -f -d $(mktemp -d /tmp/BHXXX) -o $(($2)) $1" \
--command-name squashfs-tools "$UNSQUASHFS -quiet -no-progress -d $(mktemp -d /tmp/BHXXX) -f -o $(($2)) -ignore-errors $1" \
--export-markdown bench-results/$3.md -i
fi
echo ""
file $1
(echo "### \`$(basename $1)\`"; cat bench-results/$3.md) > bench-results/$3_final.md
rm -rf /tmp/BH*
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--quick)
QUICK_MODE=true
shift
;;
esac
done
rm -rf bench-results
rm -rf last-release
mkdir -p last-release
mkdir -p bench-results
# xz
bench "backhand-test/test-assets/test_openwrt_tplink_archera7v5/openwrt-22.03.2-ath79-generic-tplink_archer-a7-v5-squashfs-factory.bin" 0x225fd0 0_openwrt1
# xz
bench "backhand-test/test-assets/test_openwrt_netgear_ex6100v2/openwrt-22.03.2-ipq40xx-generic-netgear_ex6100v2-squashfs-factory.img" 0x2c0080 1_openwrt2
# xz
bench "backhand-test/test-assets/test_re815_xev160/870D97.squashfs" 0x0 2_re815
# xz
bench "backhand-test/test-assets/test_tplink_ax1800/img-1571203182_vol-ubi_rootfs.ubifs" 0x0 3_ax18000
# xz
bench "test-assets/test_archlinux_iso_rootfs/airootfs.sfs" 0x0
# xz
bench "backhand-test/test-assets/test_er605_v2_2/2611E3.squashfs" 0x0 4_er605
# gzip
bench "backhand-test/test-assets/test_appimage_plexamp/Plexamp-4.6.1.AppImage" 0x2dfe8 5_plexamp
# zstd
bench "backhand-test/test-assets/crates_io_zstd/crates-io.squashfs" 0x0 6_crates_zstd
cat bench-results/*_final.md > results.md
echo "Cool, now add results.md to BENCHMARK.md"
07070100000051000081ED0000000000000000000000016854DB950000057A000000000000000000000000000000000000002100000000backhand-0.23.0/create_test.bash#!/bin/bash
set -ex
rm -rf testing
for (( a=0; a<1; a++ ))
do
empty=$(xxd -l 1 -c 1 -p < /dev/random)
mkdir -p testing/$empty
empty=$(xxd -l 1 -c 1 -p < /dev/random)
mkdir -p testing/$empty
parent=$(xxd -l 1 -c 1 -p < /dev/random)
for (( b=0; b<5; b++ ))
do
child1=$(xxd -l 1 -c 1 -p < /dev/random)
for (( c=0; c<1; c++ ))
do
child2=$(xxd -l 1 -c 1 -p < /dev/random)
mkdir -p testing/$parent/$child1/$child2
file=$(xxd -l 1 -c 1 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/$file bs=5 count=1
file=$(xxd -l 1 -c 1 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/$file bs=5 count=1
file=$(xxd -l 16 -c 16 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/$file bs=5 count=1
file=$(xxd -l 16 -c 16 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/$file bs=500 count=1
file=$(xxd -l 16 -c 16 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/$file bs=1000 count=1
file=$(xxd -l 16 -c 16 -p < /dev/random)
dd if=/dev/random of=testing/$parent/$child1/$child2/file2 bs=131100 count=1
done
done
done
rm -rf out.squashfs
mksquashfs testing out.squashfs -comp xz
07070100000052000081A40000000000000000000000016854DB95000009A2000000000000000000000000000000000000001B00000000backhand-0.23.0/flake.lock{
"nodes": {
"crane": {
"locked": {
"lastModified": 1734541973,
"narHash": "sha256-1wIgLmhvtfxbJVnhFHUYhPqL3gpLn5JhiS4maaD9RRk=",
"owner": "ipetkov",
"repo": "crane",
"rev": "fdd502f921936105869eba53db6593fc2a424c16",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1734424634,
"narHash": "sha256-cHar1vqHOOyC7f1+tVycPoWTfKIaqkoe1Q6TnKzuti4=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "d3c42f187194c26d9f0309a8ecc469d6c878ce33",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1734661750,
"narHash": "sha256-BI58NBdimxu1lnpOrG9XxBz7Cwqy+qIf99zunWofX5w=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "7d3d910d5fd575e6e8c5600d83d54e5c47273bfe",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}
07070100000053000081A40000000000000000000000016854DB9500000673000000000000000000000000000000000000001A00000000backhand-0.23.0/flake.nix{
description = "Flake for backhand, a library and binaries for the reading, creating, and modification of SquashFS file systems";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
crane.url = "github:ipetkov/crane";
};
outputs =
{
self,
nixpkgs,
flake-utils,
rust-overlay,
crane,
}:
flake-utils.lib.eachDefaultSystem (
system:
let
overlays = [ rust-overlay.overlays.default ];
pkgs = import nixpkgs { inherit system overlays; };
rust = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml;
craneLib = (crane.mkLib nixpkgs.legacyPackages.${system}).overrideToolchain rust;
commonArgs = {
pname = "backhand";
version = "0.19.0";
src = craneLib.cleanCargoSource self;
strictDeps = true;
nativeBuildInputs = with pkgs; [
cmake
];
};
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
in
{
packages = rec {
backhand = craneLib.buildPackage (
commonArgs
// {
inherit cargoArtifacts;
doCheck = false;
}
);
default = backhand;
};
devShells.default = craneLib.devShell {
packages =
with pkgs;
[
git
]
++ commonArgs.nativeBuildInputs;
};
}
);
}
07070100000054000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000001500000000backhand-0.23.0/fuzz07070100000055000081A40000000000000000000000016854DB9500000021000000000000000000000000000000000000002000000000backhand-0.23.0/fuzz/.gitignoretarget
corpus
artifacts
coverage
07070100000056000081A40000000000000000000000016854DB95000038AD000000000000000000000000000000000000002000000000backhand-0.23.0/fuzz/Cargo.lock# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]]
name = "arbitrary"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223"
[[package]]
name = "backhand"
version = "0.22.0"
dependencies = [
"deku",
"flate2",
"liblzma",
"lz4_flex",
"solana-nohash-hasher",
"thiserror",
"tracing",
"xxhash-rust",
"zstd",
"zstd-safe",
]
[[package]]
name = "backhand-fuzz"
version = "0.0.0"
dependencies = [
"backhand",
"libafl_libfuzzer",
]
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "cc"
version = "1.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c"
dependencies = [
"jobserver",
"libc",
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crc32fast"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
[[package]]
name = "darling"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
dependencies = [
"darling_core",
"darling_macro",
]
[[package]]
name = "darling_core"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
"darling_core",
"quote",
"syn",
]
[[package]]
name = "deku"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0884cacbbfed7294f52619ec9092550a78728ef7bef9ae644dd00750763a4022"
dependencies = [
"bitvec",
"deku_derive",
"no_std_io2",
"rustversion",
]
[[package]]
name = "deku_derive"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1752be5b3a3f7053230fe4e6c023b3f9fdbbe91bc0d87dc4ae92ffdfaa2afc90"
dependencies = [
"darling",
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "flate2"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc"
dependencies = [
"crc32fast",
"libz-rs-sys",
"miniz_oxide",
]
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
[[package]]
name = "hermit-abi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "jobserver"
version = "0.1.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0"
dependencies = [
"libc",
]
[[package]]
name = "libafl_libfuzzer"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96643d5308a45235621a79c4bce00b75ea8cb587f2c08e719c4f098f53eb6652"
dependencies = [
"cc",
"libfuzzer-sys",
"rustversion",
"toml",
]
[[package]]
name = "libc"
version = "0.2.170"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828"
[[package]]
name = "libfuzzer-sys"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75"
dependencies = [
"arbitrary",
"cc",
]
[[package]]
name = "liblzma"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "66352d7a8ac12d4877b6e6ea5a9b7650ee094257dc40889955bea5bc5b08c1d0"
dependencies = [
"liblzma-sys",
"num_cpus",
]
[[package]]
name = "liblzma-sys"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5839bad90c3cc2e0b8c4ed8296b80e86040240f81d46b9c0e9bc8dd51ddd3af1"
dependencies = [
"cc",
"libc",
"pkg-config",
]
[[package]]
name = "libz-rs-sys"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "902bc563b5d65ad9bba616b490842ef0651066a1a1dc3ce1087113ffcb873c8d"
dependencies = [
"zlib-rs",
]
[[package]]
name = "lz4_flex"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "miniz_oxide"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5"
dependencies = [
"adler2",
]
[[package]]
name = "no_std_io2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c2b9acd47481ab557a89a5665891be79e43cce8a29ad77aa9419d7be5a7c06a"
dependencies = [
"memchr",
]
[[package]]
name = "num_cpus"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "once_cell"
version = "1.20.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pkg-config"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "proc-macro-crate"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b"
dependencies = [
"toml_edit",
]
[[package]]
name = "proc-macro2"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801"
dependencies = [
"proc-macro2",
]
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rustversion"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
[[package]]
name = "serde"
version = "1.0.218"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.218"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_spanned"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "solana-nohash-hasher"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e"
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
version = "2.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "thiserror"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "toml"
version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.22.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
dependencies = [
"once_cell",
]
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "winnow"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1"
dependencies = [
"memchr",
]
[[package]]
name = "wyz"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "xxhash-rust"
version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
[[package]]
name = "zlib-rs"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b20717f0917c908dc63de2e44e97f1e6b126ca58d0e391cee86d504eb8fbd05"
[[package]]
name = "zstd"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a"
dependencies = [
"zstd-safe",
]
[[package]]
name = "zstd-safe"
version = "7.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722"
dependencies = [
"zstd-sys",
]
[[package]]
name = "zstd-sys"
version = "2.0.14+zstd.1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5"
dependencies = [
"cc",
"pkg-config",
]
07070100000057000081A40000000000000000000000016854DB950000028C000000000000000000000000000000000000002000000000backhand-0.23.0/fuzz/Cargo.toml[package]
name = "backhand-fuzz"
version = "0.0.0"
publish = false
edition = "2021"
[package.metadata]
cargo-fuzz = true
[dependencies]
libfuzzer-sys = { version = "0.15.0", package = "libafl_libfuzzer" }
[dependencies.backhand]
path = "../backhand"
# Prevent this from interfering with workspaces
[workspace]
members = ["."]
[profile.release]
debug = 1
[[bin]]
name = "bytes"
path = "fuzz_targets/bytes.rs"
test = false
doc = false
[[bin]]
name = "filesystem"
path = "fuzz_targets/filesystem.rs"
test = false
doc = false
[[bin]]
name = "raw"
path = "fuzz_targets/raw.rs"
test = false
doc = false
[features]
xz-static = ["backhand/xz-static"]
07070100000058000041ED0000000000000000000000026854DB9500000000000000000000000000000000000000000000002200000000backhand-0.23.0/fuzz/fuzz_targets07070100000059000081A40000000000000000000000016854DB95000000C1000000000000000000000000000000000000002B00000000backhand-0.23.0/fuzz/fuzz_targets/bytes.rs#![no_main]
use backhand::Squashfs;
use libfuzzer_sys::fuzz_target;
fuzz_target!(|data: Vec<u8>| {
let reader = std::io::Cursor::new(data);
let _ = Squashfs::from_reader(reader);
});
0707010000005A000081A40000000000000000000000016854DB95000000D1000000000000000000000000000000000000003000000000backhand-0.23.0/fuzz/fuzz_targets/filesystem.rs#![no_main]
use backhand::FilesystemReader;
use libfuzzer_sys::fuzz_target;
fuzz_target!(|data: Vec<u8>| {
let reader = std::io::Cursor::new(data);
let _ = FilesystemReader::from_reader(reader);
});
0707010000005B000081A40000000000000000000000016854DB95000003C8000000000000000000000000000000000000002900000000backhand-0.23.0/fuzz/fuzz_targets/raw.rs#![no_main]
use backhand::{FilesystemReader, FilesystemWriter, NodeHeader};
use libfuzzer_sys::fuzz_target;
fuzz_target!(|data: Vec<u8>| {
let header = NodeHeader { permissions: 0o755, uid: 0, gid: 0, mtime: 0 };
let mut fs = FilesystemWriter::default();
fs.set_time(0x634f5237);
fs.push_dir("oh", header).unwrap();
fs.push_dir("oh/my", header).unwrap();
fs.push_file(std::io::Cursor::new(&data), "heyo", header).unwrap();
fs.push_file(std::io::Cursor::new(&data), "wow", header).unwrap();
fs.push_dir_all("this/is", header).unwrap();
fs.push_file(std::io::Cursor::new(&data), "this/is/extreme", header).unwrap();
let mut output = std::io::Cursor::new(vec![]);
fs.write(&mut output).unwrap();
// reset the position to the start so we can read this as a file
output.set_position(0);
// all files create using FilesystemWriter need to be valid
let _ = FilesystemReader::from_reader(output).unwrap();
});
0707010000005C000081A40000000000000000000000016854DB950000023E000000000000000000000000000000000000001900000000backhand-0.23.0/justfile# Matches build-test-native
build:
cargo build --release --bins
test: build
cargo nextest run --release
bench:
cargo bench
lint:
cargo fmt
cargo clippy
# Matches .github/workflows/coverage.yml
coverage:
cargo llvm-cov run --bin replace-backhand --no-clean --release || true
cargo llvm-cov run --bin add-backhand --no-clean --release || true
cargo llvm-cov run --bin unsquashfs-backhand --no-clean --release || true
cargo llvm-cov nextest --workspace --codecov --output-path codecov.json --features __test_unsquashfs --release --no-clean
0707010000005D000081A40000000000000000000000016854DB9500000019000000000000000000000000000000000000001D00000000backhand-0.23.0/release.tomlpush=false
publish=false
0707010000005E000081A40000000000000000000000016854DB950000010C000000000000000000000000000000000000001E00000000backhand-0.23.0/renovate.json{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
"group:allNonMajor",
"schedule:weekly"
],
"lockFileMaintenance": {
"enabled": true,
"schedule": [
"after 11am on sunday"
]
}
}
0707010000005F000081A40000000000000000000000016854DB9500000075000000000000000000000000000000000000002400000000backhand-0.23.0/rust-toolchain.toml[toolchain]
channel = "stable"
components = [ "rust-src", "rust-analyzer", "rustfmt", "clippy" ]
profile = "default"
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!1006 blocks