File suse-kabi-tools-0.5.0+git0.9ad91db.obscpio of Package suse-kabi-tools
07070100000000000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002A00000000suse-kabi-tools-0.5.0+git0.9ad91db/.cargo07070100000001000081A40000000000000000000000016878E92800000034000000000000000000000000000000000000003600000000suse-kabi-tools-0.5.0+git0.9ad91db/.cargo/config.toml[build]
rustdocflags = ["--document-private-items"]
07070100000002000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002B00000000suse-kabi-tools-0.5.0+git0.9ad91db/.github07070100000003000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003500000000suse-kabi-tools-0.5.0+git0.9ad91db/.github/workflows07070100000004000081A40000000000000000000000016878E92800000410000000000000000000000000000000000000003C00000000suse-kabi-tools-0.5.0+git0.9ad91db/.github/workflows/ci.ymlname: Continuous integration
on: push
env:
CARGO_TERM_COLOR: always
jobs:
build_and_test:
name: Build and test
runs-on: ubuntu-latest
container: opensuse/tumbleweed
strategy:
fail-fast: false
matrix:
toolchain:
- distribution
- stable
- beta
- nightly
steps:
- name: Check out the repository
uses: actions/checkout@v4
- name: Install the ${{ matrix.toolchain }} Rust toolchain
shell: bash
run: |
if [ ${{ matrix.toolchain }} == distribution ]; then
zypper --non-interactive install cargo rust
else
zypper --non-interactive install rustup
rustup update ${{ matrix.toolchain }}
rustup default ${{ matrix.toolchain }}
fi
- name: Build the project
run: cargo build
- name: Run tests
run: cargo test
- name: Check documentation
env:
RUSTDOCFLAGS: -D warnings
run: cargo doc --no-deps
07070100000005000081A40000000000000000000000016878E92800000555000000000000000000000000000000000000003F00000000suse-kabi-tools-0.5.0+git0.9ad91db/.github/workflows/pages.ymlname: Deploy documentation to GitHub Pages
on:
push:
branches: ["main"]
# Allow to run this workflow manually from the Actions tab.
workflow_dispatch:
# Allow only one concurrent deployment, but don't cancel any in-progress runs.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
build:
name: Build content
runs-on: ubuntu-latest
container: opensuse/tumbleweed
steps:
- name: Check out the repository
uses: actions/checkout@v4
- name: Install build dependencies
run: zypper --non-interactive install groff-full
- name: Render man pages to HTML
run: |
mkdir pages
groff -mandoc -Thtml doc/ksymtypes.1 > pages/ksymtypes.1.html
groff -mandoc -Thtml doc/ksymvers.1 > pages/ksymvers.1.html
groff -mandoc -Thtml doc/suse-kabi-tools.5 > pages/suse-kabi-tools.5.html
- name: Upload the content as artifact
uses: actions/upload-pages-artifact@v3
with:
path: pages/
deploy:
name: Deploy to GitHub Pages
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
07070100000006000081A40000000000000000000000016878E92800000008000000000000000000000000000000000000002E00000000suse-kabi-tools-0.5.0+git0.9ad91db/.gitignore/target
07070100000007000081A40000000000000000000000016878E928000046AC000000000000000000000000000000000000002B00000000suse-kabi-tools-0.5.0+git0.9ad91db/COPYING GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
07070100000008000081A40000000000000000000000016878E9280000009F000000000000000000000000000000000000002E00000000suse-kabi-tools-0.5.0+git0.9ad91db/Cargo.lock# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "suse-kabi-tools"
version = "0.1.0"
07070100000009000081A40000000000000000000000016878E928000000D6000000000000000000000000000000000000002E00000000suse-kabi-tools-0.5.0+git0.9ad91db/Cargo.toml# Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
# SPDX-License-Identifier: GPL-2.0-or-later
[package]
name = "suse-kabi-tools"
version = "0.1.0"
authors = ["Petr Pavlu <petr.pavlu@suse.com>"]
edition = "2024"
0707010000000A000081A40000000000000000000000016878E92800000713000000000000000000000000000000000000002D00000000suse-kabi-tools-0.5.0+git0.9ad91db/README.md# suse-kabi-tools
## Overview
suse-kabi-tools is a set of Application Binary Interface (ABI) tools for the Linux kernel.
The project contains the following utilities:
* ksymtypes – a tool to work with symtypes files, which are produced by
[genksyms][genksyms] during the Linux kernel build. It allows you to consolidate multiple symtypes
files into a single file and to compare symtypes data.
* ksymvers – a tool to work with symvers files, which are produced by [modpost][modpost]
during the Linux kernel build. It allows you to compare symvers data, taking into account specific
severity rules.
The tools aim to provide fast and detailed kABI comparison. The most time-consuming operations can
utilize multiple threads running in parallel.
The project is implemented in Rust. The code depends only on the standard library, which avoids
bloating the build and keeps project maintenance low.
Manual pages: [ksymtypes(1)][ksymtypes_1], [ksymvers(1)][ksymvers_1],
[suse-kabi-tools(5)][suse_kabi_tools_5].
## Installation
Ready-to-install packages for (open)SUSE distributions are available in [the Kernel:tools
project][kernel_tools] in the openSUSE Build Service.
To build the project locally, install a Rust toolchain and run `cargo build`.
## License
This project is released under the terms of [the GPLv2 license](COPYING).
[genksyms]: https://github.com/torvalds/linux/tree/master/scripts/genksyms
[modpost]: https://github.com/torvalds/linux/tree/master/scripts/mod
[ksymtypes_1]: https://suse.github.io/suse-kabi-tools/ksymtypes.1.html
[ksymvers_1]: https://suse.github.io/suse-kabi-tools/ksymvers.1.html
[suse_kabi_tools_5]: https://suse.github.io/suse-kabi-tools/suse-kabi-tools.5.html
[kernel_tools]: https://build.opensuse.org/package/show/Kernel:tools/suse-kabi-tools
0707010000000B000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002700000000suse-kabi-tools-0.5.0+git0.9ad91db/doc0707010000000C000081A40000000000000000000000016878E928000011FB000000000000000000000000000000000000003300000000suse-kabi-tools-0.5.0+git0.9ad91db/doc/ksymtypes.1.\" Copyright (C) 2024-2025 SUSE LLC <petr.pavlu@suse.com>
.\" SPDX-License-Identifier: GPL-2.0-or-later
.TH KSYMTYPES 1
.SH NAME
ksymtypes \- a tool to work with Linux\-kernel symtypes files
.SH SYNOPSIS
\fBksymtypes\fR [\fIGENERAL\-OPTION\fR...] {\fBconsolidate\fR | \fBsplit\fR | \fBcompare\fR } [\fICOMMAND\-OPTION\fR...] ...
.SH DESCRIPTION
\fBksymtypes\fR is a tool that provides functionality to work with symtypes files. These files
describe the Application Binary Interface (ABI) of the kernel and its modules. The data is produced
by \fBgenksyms\fR utilities from the kernel tree.
.PP
The tool primarily operates with sets of symtypes files as they are produced during a single build
of the Linux kernel. Each such set describes the ABI of a specific kernel and its modules. The tool
refers to this set as a "symtypes corpus".
.PP
The provided functionality is divided into integrated commands. The currently available commands are
\fBconsolidate\fR, \fBsplit\fR and \fBcompare\fR. The \fBconsolidate\fR command takes a symtypes
corpus composed of a set of symtypes files and produces its consolidated variant by merging
duplicated types. The \fBsplit\fR takes a consolidated symtypes file and divides it into individual
files. The \fBcompare\fR command shows the differences between two symtypes corpuses, which can be
either in split or consolidated form.
.SH GENERAL OPTIONS
.TP
\fB\-d\fR, \fB\-\-debug\fR
Enable debug output.
.TP
\fB\-h\fR, \fB\-\-help\fR
Display global help information and exit.
.TP
\fB\-\-version\fR
Output version information and exit.
.SH CONSOLIDATE COMMAND
\fBksymtypes\fR \fBconsolidate\fR [\fICONSOLIDATE\-OPTION\fR...] \fIPATH\fR
.PP
The \fBconsolidate\fR command reads symtypes files from the specified path, consolidates their
contents by merging duplicate types and writes the output to the specified file. The input path
should point to a directory that the command recursively searches for all symtypes files. In a
typical use case, this will be a build directory of the Linux kernel.
.PP
Available options:
.TP
\fB\-h\fR, \fB\-\-help\fR
Display help information for the command and exit.
.TP
\fB\-j\fR \fINUM\fR, \fB\-\-jobs\fR=\fINUM\fR
Use \fINUM\fR workers to perform the operation simultaneously.
.TP
\fB\-o\fR \fIFILE\fR, \fB\-\-output\fR=\fIFILE\fR
Write the result to \fIFILE\fR. This option is mandatory.
.SH SPLIT COMMAND
\fBksymtypes\fR \fBsplit\fR [\fISPLIT\-OPTION\fR...] \fIPATH\fR
.PP
The \fBsplit\fR command reads a consolidated symtypes files from the specified path and divides the
data into individual symtypes files. This operation is the opposite of the \fBconsolidate\fR
command.
.PP
Note that consolidating and then splitting symtypes data might not produce output that is exactly
the same as the original due to potential differences in the ordering of records.
.PP
Available options:
.TP
\fB\-h\fR, \fB\-\-help\fR
Display help information for the command and exit.
.TP
\fB\-j\fR \fINUM\fR, \fB\-\-jobs\fR=\fINUM\fR
Use \fINUM\fR workers to perform the operation simultaneously.
.TP
\fB\-o\fR \fIDIR\fR, \fB\-\-output\fR=\fIDIR\fR
Write the result to \fIDIR\fR. This option is mandatory.
.SH COMPARE COMMAND
\fBksymtypes\fR \fBcompare\fR [\fICOMPARE\-OPTION\fR...] \fIPATH\fR \fIPATH2\fR
.PP
The \fBcompare\fR command shows the differences between two symtypes corpuses. A corpus can be
specified by a directory containing symtypes files or by a consolidated symtypes file. In a typical
use case, the first input will point to a reference consolidated symtypes corpus and the second
input will point to data from a new build of the Linux kernel.
.PP
Available options:
.TP
\fB\-h\fR, \fB\-\-help\fR
Display help information for the command and exit.
.TP
\fB\-j\fR \fINUM\fR, \fB\-\-jobs\fR=\fINUM\fR
Use \fINUM\fR workers to perform the operation simultaneously.
.TP
\fB\-\-filter\-symbol\-list\fR=\fIFILE\fR
Consider only symbols that match the patterns in \fIFILE\fR.
.SH EXAMPLES
Build the Linux kernel and obtain a reference symvers and consolidated symtypes corpus:
.IP
.EX
$ cd <linux\-kernel\-directory>
$ make O=build ...
$ cp build/Module.symvers base.symvers
$ ksymtypes consolidate \-\-output=base.symtypes build/
.EE
.PP
Build a new version of the Linux kernel and compare its ABI with the previous reference:
.IP
.EX
$ cd <linux\-kernel\-directory>
$ vim ... # edit the code
$ make O=build ...
$ ksymvers compare --format=symbols:changed-exports base.symvers build/Module.symvers
$ ksymtypes compare --filter-symbol-list=changed-exports base.symtypes build/
.EE
.SH SEE ALSO
\fBksymvers\fR(1), \fBsuse-kabi-tools\fR(5)
0707010000000D000081A40000000000000000000000016878E9280000082B000000000000000000000000000000000000003200000000suse-kabi-tools-0.5.0+git0.9ad91db/doc/ksymvers.1.\" Copyright (C) 2024-2025 SUSE LLC <petr.pavlu@suse.com>
.\" SPDX-License-Identifier: GPL-2.0-or-later
.TH KSYMVERS 1
.SH NAME
ksymvers \- a tool to work with Linux\-kernel symvers files
.SH SYNOPSIS
\fBksymvers\fR [\fIGENERAL\-OPTION\fR...] { \fBcompare\fR } [\fICOMMAND\-OPTION\fR...] ...
.SH DESCRIPTION
\fBksymvers\fR is a tool that provides functionality to work with symvers files. These files
summarize the exported symbols of the kernel and its modules. The data is produced by the
\fBmodpost\fR utility from the kernel tree.
.PP
The tool primarily operates with a final symvers file, typically called \fIModule.symvers\fR, as it
is produced during a single build of the Linux kernel.
.PP
The provided functionality is divided into integrated commands. The only currently available
command is \fBcompare\fR, which shows the differences between two symvers files.
.SH GENERAL OPTIONS
.TP
\fB\-d\fR, \fB\-\-debug\fR
Enable debug output.
.TP
\fB\-h\fR, \fB\-\-help\fR
Display global help information and exit.
.TP
\fB\-\-version\fR
Output version information and exit.
.SH COMPARE COMMAND
\fBksymvers\fR \fBcompare\fR [\fICOMPARE\-OPTION\fR...] \fIFILE\fR \fIFILE2\fR
.PP
The \fBcompare\fR command shows the differences between two symvers files. In a typical use case,
the first input will point to a reference symvers file and the second input will point to a symvers
file from a new build of the Linux kernel.
.PP
Available options:
.TP
\fB\-h\fR, \fB\-\-help\fR
Display help information for the command and exit.
.TP
\fB-r\fR \fIFILE\fR, \fB\-\-rules\fR=\fIFILE\fR
Load kABI severity rules from \fIFILE\fR.
.TP
\fB\-f\fR \fITYPE[:FILE]\fR, \fB\-\-format\fR=\fITYPE[:FILE]\fR
Change the output format to \fITYPE\fR, or write the \fITYPE\fR-formatted output to \fIFILE\fR.
.IP
The \fITYPE\fR can be one of the following:
.RS 14
.IP \[bu] 2
\fInull\fR \(en produces no output,
.IP \[bu] 2
\fIpretty\fR \(en produces human-readable output,
.IP \[bu] 2
\fIsymbols\fR \(en shows only the names of symbols that have changed.
.RE
.SH SEE ALSO
\fBksymtypes\fR(1), \fBsuse-kabi-tools\fR(5)
0707010000000E000081A40000000000000000000000016878E92800001E50000000000000000000000000000000000000003900000000suse-kabi-tools-0.5.0+git0.9ad91db/doc/suse-kabi-tools.5.\" Copyright (C) 2024-2025 SUSE LLC <petr.pavlu@suse.com>
.\" SPDX-License-Identifier: GPL-2.0-or-later
.TH SUSE-KABI-TOOLS 5
.SH NAME
suse-kabi-tools \- Linux-kernel Application Binary Interface (ABI) definition files
.SH DESCRIPTION
suse-kabi-tools operate on several data formats: symbol types (symtypes), symbol versions (symvers),
consolidated symtypes, and kABI severity rules.
.PP
The symtypes and symvers data files are generated by the Linux kernel build, and their format is
effectively defined by the Linux project. The consolidated symtypes and kABI severity rules are
custom formats defined specifically by suse-kabi-tools.
.PP
This document describes these formats.
.SH SYMBOL TYPES
.SS DESCRIPTION
Symtypes files provide detailed information about the ABI in the Linux kernel. The format describes
exported functions, variables and their dependent types as known in a single object file. The data
is generated by the \fBgenksyms\fR utility from the kernel tree. It records the type information
that was used by the tool to calculate a signature (CRC) of each symbol.
.SS FORMAT
A symtypes file consists of type records, one per line. Each record is comprised of a type
identifier and an associated type description, separated by whitespace.
.PP
A type identifier can be one of the following:
.RS
.IP \[bu] 2
<exported-name> \(en an exported function or variable definition (no prefix),
.IP \[bu] 2
t#<typedef-name> \(en a typedef definition,
.IP \[bu] 2
e#<enum-name> \(en an enumeration definition,
.IP \[bu] 2
s#<struct-name> \(en a structure definition,
.IP \[bu] 2
u#<union-name> \(en a union definition,
.IP \[bu] 2
E#<enum-constant-name> \(en an enumerator definition.
.RE
.PP
A type description consists of a list of tokens, separated by whitespace. A single token can be a
literal value directly contributing to the type definition or a type reference. References are in
the form "<x>#<type-name>" and point to another type defined in the file.
.PP
A type name can be optionally enclosed in single quotes, both when when defining the type and when
referencing it. This allows the type name to contain spaces.
.SS EXAMPLES
The following example shows the file \fIa.symtypes\fR. The data records the exported function "baz",
which takes as its parameters the structure "foo" and a pointer to the union "bar". The structure
"foo" has a complete definition in the file, while the union "bar" is an opaque declaration.
.IP
.EX
$ cat a.symtypes
s#foo struct foo { int m ; }
u#bar union bar { UNKNOWN }
baz void baz ( s#foo a1 , u#bar * a2 )
.EE
.SH SYMBOL VERSIONS
.SS DESCRIPTION
A symvers file provides final information about the ABI in the Linux kernel. The file summarizes
data that was embedded during the Linux kernel build into the main kernel binary and loadable
modules to detect compatibility between symbol references and associated definitions. The data is
generated by the \fBmodpost\fR utility from the kernel tree.
.SS FORMAT
A symvers file consists of symbol records, one per line. Each record is comprised of a 32-bit symbol
CRC, a symbol name, a module name, an export type and optionally a namespace identifier, all
separated by whitespace.
.SS EXAMPLES
The following example shows the file \fIModule.symvers\fR. The data records two exported functions
"baz" and "qux". The symbol "baz" has a CRC of 0x12345678, originates from the main kernel binary
and is a regular export. The symbol "qux" has a CRC of 0x90abcdef, originates from the lib/test
module, is a GPL-only export and is defined in the BAR_NS namespace.
.IP
.EX
$ cat Module.symvers
0x12345678 baz vmlinux EXPORT_SYMBOL
0x90abcdef qux lib/test EXPORT_SYMBOL_GPL BAR_NS
.EE
.SH CONSOLIDATED SYMBOL TYPES
.SS DESCRIPTION
The consolidated symtypes format extends the base symtypes format to efficiently describe types
across multiple object files. This allows to have one file for the entire kernel ABI. The format is
generated by the \fBksymtypes\fR \fBconsolidate\fR command.
.SS FORMAT
The format introduces the concept of file sections, with each section starting with a special record
in the form "/* <file-name> */". The content of each section can be trivially same as in the case of
the base symtypes format. However, two extensions are present to save storage space.
.PP
A file section can omit a type definition if it is the same as its last definition previously
encountered in the consolidated file. A reader can implicitly determine its presence be by
recursively walking all exports in the specific file.
.PP
A file section can transform opaque declarations in the form "<short-type>#<name> <type> <name> {
UNKNOWN }" to "<short-type>##<name>". For instance, "s#task_struct struct task_struct { UNKNOWN }"
becomes "s##task_struct". Such definitions apply only to the current file section and do not
override the last definition of the symbol.
.SS EXAMPLES
The following example shows two files \fIa.symtypes\fR and \fI.b.symtypes\fR using the base format.
The first file \fIa.symtypes\fR records the exported function "baz", which takes as its parameters
the structure "foo" and a pointer to the union "bar", with the former having a complete definition
and the latter being an opaque declaration. The second file \fIb.symtypes\fR records the exported
function "qux", which takes as its parameters the structure "foo" and a pointer to the union "bar",
with both types having a complete definition.
.IP
.EX
$ cat example/a.symtypes
s#foo struct foo { int m ; }
u#bar union bar { UNKNOWN }
baz void baz ( s#foo a1 , u#bar * a2 )
.EE
.IP
.EX
$ cat example/b.symtypes
s#foo struct foo { int m ; }
u#bar union bar { int i; float f; }
qux void qux ( s#foo a1 , u#bar * a2 )
.EE
.PP
The following example shows the file \fIc.symtypes\fR, which is produced by consolidating the
previous two files \fIa.symtypes\fR and \fIb.symtypes\fR. The structure type "foo", which was the
same in both files, is merged; the union type "bar" appears in two different variants.
.IP
.EX
$ ksymtypes consolidate --output=example/c.kabi example/
$ cat example/c.kabi
/* a.symtypes */
s#foo struct foo { int m ; }
u##bar
baz void baz ( s#foo a1 , u#bar * a2 )
/* b.symtypes */
u#bar union bar { int i; float f; }
qux void qux ( s#foo a1 , u#bar * a2 )
.EE
.SH KABI SEVERITY RULES
.SS DESCRIPTION
A kABI severity file provides rules to be used in the comparison of symvers data.
.SS FORMAT
A kABI severity file consists of rules, one per line. Each rule is comprised of a pattern
and an associated verdict, separated by whitespace.
.PP
A pattern can be one of the following:
.RS
.IP \[bu] 2
<module-name> \(en a module name, indicated by the presence of the character "/" in the name, or by
the name being literally "vmlinux",
.IP \[bu] 2
<namespace-identifier> \(en a namespace identifier, indicated by all letters being capital,
.IP \[bu] 2
<symbol-name> \(en a symbol name, the default case.
.RE
.PP
A pattern can contain the shell wildcards "*" and "?", with their usual meaning.
.PP
A verdict can be either "PASS" or "FAIL".
.PP
The file can contain comments beginning with "#", which extend to the end of the line. Rules are
ordered, and the first match takes effect.
.SS EXAMPLES
The following example shows the file \fIseverity.rules\fR, which defines three rules. The first rule
is a module rule that indicates all changes in modules matching "lib/important*" should result in a
failure. The second rule is a namespace rule that indicates all changes in the namespace
"TEST_IMPORTANT" should result in a failure. The last rule is a symbol rule that indicates all
changes to symbols matching "*not_stable*" can be ignored.
.IP
.EX
$ cat severity.rules
lib/important* FAIL
TEST_IMPORTANT FAIL
*not_stable* PASS
.EE
.SH SEE ALSO
\fBksymtypes\fR(1), \fBksymvers\fR(1)
0707010000000F000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002700000000suse-kabi-tools-0.5.0+git0.9ad91db/src07070100000010000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002B00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/bin07070100000011000081A40000000000000000000000016878E92800003305000000000000000000000000000000000000003800000000suse-kabi-tools-0.5.0+git0.9ad91db/src/bin/ksymtypes.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::process::ExitCode;
use std::{env, io};
use suse_kabi_tools::cli::{handle_value_option, process_global_args};
use suse_kabi_tools::symtypes::SymtypesCorpus;
use suse_kabi_tools::text::Filter;
use suse_kabi_tools::{Error, Timing, debug};
const USAGE_MSG: &str = concat!(
"Usage: ksymtypes [OPTION...] COMMAND\n",
"\n",
"Options:\n",
" -d, --debug enable debug output\n",
" -h, --help display this help and exit\n",
" --version output version information and exit\n",
"\n",
"Commands:\n",
" consolidate consolidate symtypes into a single file\n",
" split split a consolidated symtypes file into\n",
" individual files\n",
" compare show differences between two symtypes corpuses\n"
);
const CONSOLIDATE_USAGE_MSG: &str = concat!(
"Usage: ksymtypes consolidate [OPTION...] PATH\n",
"Consolidate symtypes into a single file.\n",
"\n",
"Options:\n",
" -h, --help display this help and exit\n",
" -j NUM, --jobs=NUM use NUM workers to perform the operation\n",
" -o FILE, --output=FILE write the result in FILE\n",
);
const SPLIT_USAGE_MSG: &str = concat!(
"Usage: ksymtypes split [OPTION...] PATH\n",
"Split a consolidated symtypes file into individual files.\n",
"\n",
"Options:\n",
" -h, --help display this help and exit\n",
" -j NUM, --jobs=NUM use NUM workers to perform the operation\n",
" -o DIR, --output=DIR write the result to DIR\n",
);
const COMPARE_USAGE_MSG: &str = concat!(
"Usage: ksymtypes compare [OPTION...] PATH PATH2\n",
"Show differences between two symtypes corpuses.\n",
"\n",
"Options:\n",
" -h, --help display this help and exit\n",
" -j NUM, --jobs=NUM use NUM workers to perform the operation\n",
" --filter-symbol-list=FILE consider only symbols matching patterns in FILE\n",
);
/// Handles the `-j`/`--jobs` option which specifies the number of workers to perform a given
/// operation simultaneously.
fn handle_jobs_option<I: Iterator<Item = String>>(
arg: &str,
args: &mut I,
) -> Result<Option<i32>, Error> {
if let Some(value) = handle_value_option(arg, args, "-j", "--jobs")? {
match value.parse::<i32>() {
Ok(jobs) => {
if jobs < 1 {
return Err(Error::new_cli(format!(
"Invalid value for '{}': must be positive",
arg
)));
}
return Ok(Some(jobs));
}
Err(err) => {
return Err(Error::new_cli(format!(
"Invalid value for '{}': {}",
arg, err
)));
}
};
}
Ok(None)
}
/// Handles the `consolidate` command which consolidates symtypes into a single file.
fn do_consolidate<I: IntoIterator<Item = String>>(do_timing: bool, args: I) -> Result<(), Error> {
// Parse specific command options.
let mut args = args.into_iter();
let mut num_workers = 1;
let mut maybe_output = None;
let mut past_dash_dash = false;
let mut maybe_path = None;
while let Some(arg) = args.next() {
if !past_dash_dash {
if let Some(value) = handle_jobs_option(&arg, &mut args)? {
num_workers = value;
continue;
}
if let Some(value) = handle_value_option(&arg, &mut args, "-o", "--output")? {
maybe_output = Some(value);
continue;
}
if arg == "-h" || arg == "--help" {
print!("{}", CONSOLIDATE_USAGE_MSG);
return Ok(());
}
if arg == "--" {
past_dash_dash = true;
continue;
}
if arg.starts_with('-') || arg.starts_with("--") {
return Err(Error::new_cli(format!(
"Unrecognized consolidate option '{}'",
arg
)));
}
}
if maybe_path.is_none() {
maybe_path = Some(arg);
continue;
}
return Err(Error::new_cli(format!(
"Excess consolidate argument '{}' specified",
arg
)));
}
let output = maybe_output.ok_or_else(|| Error::new_cli("The consolidate output is missing"))?;
let path = maybe_path.ok_or_else(|| Error::new_cli("The consolidate source is missing"))?;
// Do the consolidation.
let symtypes = {
let _timing = Timing::new(do_timing, &format!("Reading symtypes from '{}'", path));
let mut symtypes = SymtypesCorpus::new();
symtypes
.load(&path, io::stderr(), num_workers)
.map_err(|err| {
Error::new_context(format!("Failed to read symtypes from '{}'", path), err)
})?;
symtypes
};
{
let _timing = Timing::new(
do_timing,
&format!("Writing consolidated symtypes to '{}'", output),
);
symtypes.write_consolidated(&output).map_err(|err| {
Error::new_context(
format!("Failed to write consolidated symtypes to '{}'", output),
err,
)
})?;
}
Ok(())
}
/// Handles the `split` command which splits a consolidated symtypes file into individual files.
fn do_split<I: IntoIterator<Item = String>>(do_timing: bool, args: I) -> Result<(), Error> {
// Parse specific command options.
let mut args = args.into_iter();
let mut num_workers = 1;
let mut maybe_output = None;
let mut past_dash_dash = false;
let mut maybe_path = None;
while let Some(arg) = args.next() {
if !past_dash_dash {
if let Some(value) = handle_jobs_option(&arg, &mut args)? {
num_workers = value;
continue;
}
if let Some(value) = handle_value_option(&arg, &mut args, "-o", "--output")? {
maybe_output = Some(value);
continue;
}
if arg == "-h" || arg == "--help" {
print!("{}", SPLIT_USAGE_MSG);
return Ok(());
}
if arg == "--" {
past_dash_dash = true;
continue;
}
if arg.starts_with('-') || arg.starts_with("--") {
return Err(Error::new_cli(format!(
"Unrecognized split option '{}'",
arg
)));
}
}
if maybe_path.is_none() {
maybe_path = Some(arg);
continue;
}
return Err(Error::new_cli(format!(
"Excess split argument '{}' specified",
arg
)));
}
let output = maybe_output.ok_or_else(|| Error::new_cli("The split output is missing"))?;
let path = maybe_path.ok_or_else(|| Error::new_cli("The split source is missing"))?;
// Do the split.
let symtypes = {
let _timing = Timing::new(do_timing, &format!("Reading symtypes from '{}'", path));
let mut symtypes = SymtypesCorpus::new();
symtypes
.load(&path, io::stderr(), num_workers)
.map_err(|err| {
Error::new_context(format!("Failed to read symtypes from '{}'", path), err)
})?;
symtypes
};
{
let _timing = Timing::new(
do_timing,
&format!("Writing split symtypes to '{}'", output),
);
symtypes.write_split(&output, num_workers).map_err(|err| {
Error::new_context(
format!("Failed to write split symtypes to '{}'", output),
err,
)
})?;
}
Ok(())
}
/// Handles the `compare` command which shows differences between two symtypes corpuses.
fn do_compare<I: IntoIterator<Item = String>>(do_timing: bool, args: I) -> Result<(), Error> {
// Parse specific command options.
let mut args = args.into_iter();
let mut num_workers = 1;
let mut maybe_symbol_filter_path = None;
let mut past_dash_dash = false;
let mut maybe_path = None;
let mut maybe_path2 = None;
while let Some(arg) = args.next() {
if !past_dash_dash {
if let Some(value) = handle_jobs_option(&arg, &mut args)? {
num_workers = value;
continue;
}
if let Some(value) = handle_value_option(&arg, &mut args, None, "--filter-symbol-list")?
{
maybe_symbol_filter_path = Some(value);
continue;
}
if arg == "-h" || arg == "--help" {
print!("{}", COMPARE_USAGE_MSG);
return Ok(());
}
if arg == "--" {
past_dash_dash = true;
continue;
}
if arg.starts_with('-') || arg.starts_with("--") {
return Err(Error::new_cli(format!(
"Unrecognized compare option '{}'",
arg
)));
}
}
if maybe_path.is_none() {
maybe_path = Some(arg);
continue;
}
if maybe_path2.is_none() {
maybe_path2 = Some(arg);
continue;
}
return Err(Error::new_cli(format!(
"Excess compare argument '{}' specified",
arg
)));
}
let path = maybe_path.ok_or_else(|| Error::new_cli("The first compare source is missing"))?;
let path2 =
maybe_path2.ok_or_else(|| Error::new_cli("The second compare source is missing"))?;
// Do the comparison.
debug!("Compare '{}' and '{}'", path, path2);
let symtypes = {
let _timing = Timing::new(do_timing, &format!("Reading symtypes from '{}'", path));
let mut symtypes = SymtypesCorpus::new();
symtypes
.load(&path, io::stderr(), num_workers)
.map_err(|err| {
Error::new_context(format!("Failed to read symtypes from '{}'", path), err)
})?;
symtypes
};
let symtypes2 = {
let _timing = Timing::new(do_timing, &format!("Reading symtypes from '{}'", path2));
let mut symtypes2 = SymtypesCorpus::new();
symtypes2
.load(&path2, io::stderr(), num_workers)
.map_err(|err| {
Error::new_context(format!("Failed to read symtypes from '{}'", path2), err)
})?;
symtypes2
};
let maybe_symbol_filter = match maybe_symbol_filter_path {
Some(symbol_filter_path) => {
let _timing = Timing::new(
do_timing,
&format!("Reading symbol filters from '{}'", symbol_filter_path),
);
let mut symbol_filter = Filter::new();
symbol_filter.load(&symbol_filter_path).map_err(|err| {
Error::new_context(
format!(
"Failed to read symbol filters from '{}'",
symbol_filter_path
),
err,
)
})?;
Some(symbol_filter)
}
None => None,
};
{
let _timing = Timing::new(do_timing, "Comparison");
symtypes
.compare_with(&symtypes2, maybe_symbol_filter.as_ref(), "-", num_workers)
.map_err(|err| {
Error::new_context(
format!("Failed to compare symtypes from '{}' and '{}'", path, path2),
err,
)
})?;
}
Ok(())
}
fn main() -> ExitCode {
// Process global arguments.
let mut args = env::args();
let mut do_timing = false;
let result = process_global_args(
&mut args,
USAGE_MSG,
&format!("ksymtypes {}\n", env!("CARGO_PKG_VERSION")),
&mut do_timing,
);
let command = match result {
Ok(Some(command)) => command,
Ok(None) => return ExitCode::SUCCESS,
Err(err) => {
eprintln!("{}", err);
return ExitCode::FAILURE;
}
};
// Process the specified command.
let result = match command.as_str() {
"consolidate" => do_consolidate(do_timing, args),
"split" => do_split(do_timing, args),
"compare" => do_compare(do_timing, args),
_ => Err(Error::new_cli(format!(
"Unrecognized command '{}'",
command
))),
};
match result {
Ok(()) => ExitCode::SUCCESS,
Err(err) => {
eprintln!("{}", err);
ExitCode::FAILURE
}
}
}
07070100000012000081A40000000000000000000000016878E928000018AF000000000000000000000000000000000000003700000000suse-kabi-tools-0.5.0+git0.9ad91db/src/bin/ksymvers.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::env;
use std::process::ExitCode;
use suse_kabi_tools::cli::{handle_value_option, process_global_args};
use suse_kabi_tools::rules::Rules;
use suse_kabi_tools::symvers::{CompareFormat, SymversCorpus};
use suse_kabi_tools::{Error, Timing, debug};
const USAGE_MSG: &str = concat!(
"Usage: ksymvers [OPTION...] COMMAND\n",
"\n",
"Options:\n",
" -d, --debug enable debug output\n",
" -h, --help display this help and exit\n",
" --version output version information and exit\n",
"\n",
"Commands:\n",
" compare show differences between two symvers files\n",
);
const COMPARE_USAGE_MSG: &str = concat!(
"Usage: ksymvers compare [OPTION...] PATH PATH2\n",
"Show differences between two symvers files.\n",
"\n",
"Options:\n",
" -h, --help display this help and exit\n",
" -r FILE, --rules=FILE load severity rules from FILE\n",
" -f TYPE[:FILE], --format=TYPE[:FILE]\n",
" change the output format to TYPE, or write the\n",
" TYPE-formatted output to FILE\n",
);
/// Handles the `compare` command which shows differences between two symvers files.
fn do_compare<I: IntoIterator<Item = String>>(do_timing: bool, args: I) -> Result<bool, Error> {
// Parse specific command options.
let mut args = args.into_iter();
let mut maybe_rules_path = None;
let mut writers_conf = vec![(CompareFormat::Pretty, "-".to_string())];
let mut past_dash_dash = false;
let mut maybe_path = None;
let mut maybe_path2 = None;
while let Some(arg) = args.next() {
if !past_dash_dash {
if let Some(value) = handle_value_option(&arg, &mut args, "-r", "--rules")? {
maybe_rules_path = Some(value);
continue;
}
if let Some(value) = handle_value_option(&arg, &mut args, "-f", "--format")? {
match value.split_once(':') {
Some((format, path)) => {
writers_conf.push((CompareFormat::try_from_str(format)?, path.to_string()))
}
None => writers_conf[0].0 = CompareFormat::try_from_str(&value)?,
}
continue;
}
if arg == "-h" || arg == "--help" {
print!("{}", COMPARE_USAGE_MSG);
return Ok(true);
}
if arg == "--" {
past_dash_dash = true;
continue;
}
if arg.starts_with('-') || arg.starts_with("--") {
return Err(Error::new_cli(format!(
"Unrecognized compare option '{}'",
arg
)));
}
}
if maybe_path.is_none() {
maybe_path = Some(arg);
continue;
}
if maybe_path2.is_none() {
maybe_path2 = Some(arg);
continue;
}
return Err(Error::new_cli(format!(
"Excess compare argument '{}' specified",
arg
)));
}
let path = maybe_path.ok_or_else(|| Error::new_cli("The first compare source is missing"))?;
let path2 =
maybe_path2.ok_or_else(|| Error::new_cli("The second compare source is missing"))?;
// Do the comparison.
debug!("Compare '{}' and '{}'", path, path2);
let symvers = {
let _timing = Timing::new(do_timing, &format!("Reading symvers from '{}'", path));
let mut symvers = SymversCorpus::new();
symvers.load(&path).map_err(|err| {
Error::new_context(format!("Failed to read symvers from '{}'", path), err)
})?;
symvers
};
let symvers2 = {
let _timing = Timing::new(do_timing, &format!("Reading symvers from '{}'", path2));
let mut symvers2 = SymversCorpus::new();
symvers2.load(&path2).map_err(|err| {
Error::new_context(format!("Failed to read symvers from '{}'", path2), err)
})?;
symvers2
};
let maybe_rules = match maybe_rules_path {
Some(rules_path) => {
let _timing = Timing::new(
do_timing,
&format!("Reading severity rules from '{}'", rules_path),
);
let mut rules = Rules::new();
rules.load(&rules_path).map_err(|err| {
Error::new_context(
format!("Failed to read severity rules from '{}'", rules_path),
err,
)
})?;
Some(rules)
}
None => None,
};
let changed = {
let _timing = Timing::new(do_timing, "Comparison");
symvers
.compare_with(&symvers2, maybe_rules.as_ref(), &writers_conf[..])
.map_err(|err| {
Error::new_context(
format!("Failed to compare symvers from '{}' and '{}'", path, path2),
err,
)
})?
};
Ok(changed)
}
fn main() -> ExitCode {
// Process global arguments.
let mut args = env::args();
let mut do_timing = false;
let result = process_global_args(
&mut args,
USAGE_MSG,
&format!("ksymvers {}\n", env!("CARGO_PKG_VERSION")),
&mut do_timing,
);
let command = match result {
Ok(Some(command)) => command,
Ok(None) => return ExitCode::SUCCESS,
Err(err) => {
eprintln!("{}", err);
return ExitCode::FAILURE;
}
};
// Process the specified command.
let result = match command.as_str() {
"compare" => do_compare(do_timing, args).map(|is_equal| {
if is_equal {
ExitCode::SUCCESS
} else {
ExitCode::FAILURE
}
}),
_ => Err(Error::new_cli(format!(
"Unrecognized command '{}'",
command
))),
};
match result {
Ok(code) => code,
Err(err) => {
eprintln!("{}", err);
ExitCode::FAILURE
}
}
}
07070100000013000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002B00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/cli07070100000014000081A40000000000000000000000016878E92800000D79000000000000000000000000000000000000003200000000suse-kabi-tools-0.5.0+git0.9ad91db/src/cli/mod.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use crate::{Error, init_debug_level};
/// Handles a command-line option with a mandatory value.
///
/// When the `arg` matches the `short` or `long` variant, the function returns [`Ok(Some(String))`]
/// with the option value. Otherwise, [`Ok(None)`] is returned when the `arg` doesn't match, or
/// [`Err`] in case of an error.
pub fn handle_value_option<
I: Iterator<Item = String>,
S: Into<Option<&'static str>>,
L: Into<Option<&'static str>>,
>(
arg: &str,
args: &mut I,
maybe_short: S,
maybe_long: L,
) -> Result<Option<String>, Error> {
let maybe_short = maybe_short.into();
let maybe_long = maybe_long.into();
// Handle '-<short> <value>' and '-<short><value>'.
if let Some(short) = maybe_short {
if arg == short {
match args.next() {
Some(value) => return Ok(Some(value.to_string())),
None => {
return Err(Error::new_cli(format!("Missing argument for '{}'", short)));
}
};
}
if let Some(value) = arg.strip_prefix(short) {
return Ok(Some(value.to_string()));
}
}
// Handle '--<long> <value>' and '--<long>=<value>'.
if let Some(long) = maybe_long {
if arg == long {
match args.next() {
Some(value) => return Ok(Some(value.to_string())),
None => {
return Err(Error::new_cli(format!("Missing argument for '{}'", long)));
}
};
}
if let Some(rem) = arg.strip_prefix(long) {
if let Some(value) = rem.strip_prefix('=') {
return Ok(Some(value.to_string()));
}
}
}
Ok(None)
}
/// Processes command-line options, stopping at the command name.
///
/// Returns [`Ok`] containing [`Some`] with the command name, or [`Ok`] containing [`None`] if the
/// function handles an option directly (such as `--help`), or [`Err`] on error.
pub fn process_global_args<I: Iterator<Item = String>>(
args: &mut I,
usage_msg: &str,
version_msg: &str,
do_timing: &mut bool,
) -> Result<Option<String>, Error> {
// Skip over the program name.
match args.next() {
Some(_) => {}
None => return Err(Error::new_cli("Unknown program name")),
};
// Handle global options and stop at the command.
let mut maybe_command = None;
let mut debug_level = 0;
for arg in args.by_ref() {
if arg == "-d" || arg == "--debug" {
debug_level += 1;
continue;
}
if arg == "--timing" {
*do_timing = true;
continue;
}
if arg == "-h" || arg == "--help" {
print!("{}", usage_msg);
return Ok(None);
}
if arg == "--version" {
print!("{}", version_msg);
return Ok(None);
}
if arg.starts_with('-') || arg.starts_with("--") {
return Err(Error::new_cli(format!(
"Unrecognized global option '{}'",
arg
)));
}
maybe_command = Some(arg);
break;
}
init_debug_level(debug_level);
match maybe_command {
Some(command) => Ok(Some(command)),
None => Err(Error::new_cli("No command specified")),
}
}
07070100000015000081A40000000000000000000000016878E92800002000000000000000000000000000000000000000002E00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/lib.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::fmt::{Display, Formatter};
use std::fs::File;
use std::hash::{DefaultHasher, Hash, Hasher};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::sync::OnceLock;
use std::time::Instant;
use std::{error, fmt, hash, io};
pub mod cli;
pub mod rules;
pub mod symtypes;
pub mod symvers;
pub mod text;
/// An error type for the crate, annotating standard errors with contextual information and
/// providing custom errors.
#[derive(Debug)]
pub enum Error {
Context { desc: String, inner_err: Box<Error> },
CLI(String),
IO { desc: String, io_err: io::Error },
Parse(String),
}
impl Error {
/// Creates a new `Error::Context`.
pub fn new_context<S: Into<String>>(desc: S, err: Error) -> Self {
Self::Context {
desc: desc.into(),
inner_err: Box::new(err),
}
}
/// Creates a new `Error::CLI`.
pub fn new_cli<S: Into<String>>(desc: S) -> Self {
Self::CLI(desc.into())
}
/// Creates a new `Error::IO`.
pub fn new_io<S: Into<String>>(desc: S, io_err: io::Error) -> Self {
Self::IO {
desc: desc.into(),
io_err,
}
}
/// Creates a new `Error::Parse`.
pub fn new_parse<S: Into<String>>(desc: S) -> Self {
Self::Parse(desc.into())
}
}
impl error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Self::Context { desc, inner_err } => {
write!(f, "{}: ", desc)?;
inner_err.fmt(f)
}
Self::CLI(desc) => write!(f, "{}", desc),
Self::IO { desc, io_err } => {
write!(f, "{}: ", desc)?;
io_err.fmt(f)
}
Self::Parse(desc) => write!(f, "{}", desc),
}
}
}
/// An elapsed timer to measure time of some operation.
///
/// The time is measured between when the object is instantiated and when it is dropped. A message
/// with the elapsed time is output when the object is dropped.
pub enum Timing {
Active { desc: String, start: Instant },
Inactive,
}
impl Timing {
pub fn new(do_timing: bool, desc: &str) -> Self {
if do_timing {
Timing::Active {
desc: desc.to_string(),
start: Instant::now(),
}
} else {
Timing::Inactive
}
}
}
impl Drop for Timing {
fn drop(&mut self) {
match self {
Timing::Active { desc, start } => {
eprintln!("{}: {:.3?}", desc, start.elapsed());
}
Timing::Inactive => {}
}
}
}
/// A helper extension trait to map [`std::io::Error`] to [`Error`], as
/// `write!(data).map_io_error(context)`.
trait MapIOErr {
fn map_io_err(self, desc: &str) -> Result<(), Error>;
}
impl MapIOErr for Result<(), io::Error> {
fn map_io_err(self, desc: &str) -> Result<(), Error> {
self.map_err(|err| Error::new_io(desc, err))
}
}
/// A [`std::fs::File`] wrapper that tracks the file path to provide better error context.
pub struct PathFile {
path: PathBuf,
file: File,
}
impl PathFile {
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<Self> {
Ok(Self {
path: path.as_ref().to_path_buf(),
file: File::open(path)?,
})
}
pub fn create<P: AsRef<Path>>(path: P) -> io::Result<Self> {
Ok(Self {
path: path.as_ref().to_path_buf(),
file: File::create(path)?,
})
}
}
impl Read for PathFile {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.file.read(buf).map_err(|err| {
io::Error::other(Error::new_io(
format!("Failed to read data from file '{}'", self.path.display()),
err,
))
})
}
}
impl Write for PathFile {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.file.write(buf).map_err(|err| {
io::Error::other(Error::new_io(
format!("Failed to write data to file '{}'", self.path.display()),
err,
))
})
}
fn flush(&mut self) -> io::Result<()> {
self.file.flush().map_err(|err| {
io::Error::other(Error::new_io(
format!("Failed to flush data to file '{}'", self.path.display()),
err,
))
})
}
}
/// A helper extension trait to obtain the size of an array from its type.
pub trait Size {
const SIZE: usize;
}
impl<T, const S: usize> Size for [T; S] {
const SIZE: usize = S;
}
/// Calculates the hash of a given value.
fn hash<T: Hash + ?Sized>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
/// Global debugging level.
pub static DEBUG_LEVEL: OnceLock<usize> = OnceLock::new();
/// Initializes the global debugging level, can be called only once.
pub fn init_debug_level(level: usize) {
assert!(DEBUG_LEVEL.get().is_none());
DEBUG_LEVEL.get_or_init(|| level);
}
/// Prints a formatted message to the standard error if debugging is enabled.
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => {
if *$crate::DEBUG_LEVEL.get().unwrap_or(&0) > 0 {
eprintln!($($arg)*);
}
}
}
/// Asserts that `actual_desc` matches the shell wildcard pattern `expected_desc`.
#[macro_export]
macro_rules! assert_inexact {
($actual_desc:expr, $expected_desc:expr) => {{
let actual_desc = $actual_desc;
let expected_desc = $expected_desc;
assert!(
$crate::text::matches_wildcard(&actual_desc, &expected_desc),
"assertion matches_wildcard(actual, expected) failed:\n actual: {}\nexpected: {}\n",
actual_desc,
expected_desc,
);
}};
}
/// Asserts that `result` is an [`Ok`] containing `()`, representing success.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! assert_ok {
($result:expr) => {
match $result {
Ok(()) => {}
result => panic!("assertion failed: {:?} is not of type Ok(())", result),
}
};
}
/// Asserts that `result` is an [`Ok`] containing the `expected_inner` value.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! assert_ok_eq {
($result:expr, $expected_inner:expr) => {
match $result {
Ok(actual_inner) => assert_eq!(actual_inner, $expected_inner),
result => panic!("assertion failed: {:?} is not of type Ok(_)", result),
}
};
}
/// Asserts that `result` is an [`Err`] containing a [`Error::Parse`] error with the description
/// `expected_desc`.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! assert_parse_err {
($result:expr, $expected_desc:expr) => {
match $result {
Err($crate::Error::Parse(actual_desc)) => assert_eq!(actual_desc, $expected_desc),
result => panic!(
"assertion failed: {:?} is not of type Err(Error::Parse(_))",
result
),
}
};
}
/// Asserts that `result` is an [`Err`] containing a [`Error::Parse`] error with a description
/// matching the shell wildcard pattern `expected_desc`.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! assert_inexact_parse_err {
($result:expr, $expected_desc:expr) => {
match $result {
Err($crate::Error::Parse(actual_desc)) => {
$crate::assert_inexact!(actual_desc, $expected_desc)
}
result => panic!(
"assertion failed: {:?} is not of type Err(Error::Parse(_))",
result
),
}
};
}
/// Concatenates literals into a string slice and returns it as `&[u8]`.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! bytes {
($($x:expr),* $(,)?) => { concat!($($x),*).as_bytes() };
}
/// Creates a [`Vec`] of [`String`] from a list of string literals.
#[cfg(any(test, doc))]
#[macro_export]
macro_rules! string_vec {
($($x:expr),* $(,)?) => (vec![$($x.to_string()),*]);
}
07070100000016000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002D00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/rules07070100000017000081A40000000000000000000000016878E92800001A3A000000000000000000000000000000000000003400000000suse-kabi-tools-0.5.0+git0.9ad91db/src/rules/mod.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use crate::text::{matches_wildcard, read_lines};
use crate::{Error, PathFile, debug};
use std::io::prelude::*;
use std::iter::Peekable;
use std::path::Path;
#[cfg(test)]
mod tests;
/// A pattern used in the specification of a severity rule.
#[derive(Debug, PartialEq)]
enum Pattern {
Module(String),
Namespace(String),
Symbol(String),
}
impl Pattern {
/// Creates a new `Pattern::Module`.
pub fn new_module<S: Into<String>>(name: S) -> Self {
Pattern::Module(name.into())
}
/// Creates a new `Pattern::Namespace`.
pub fn new_namespace<S: Into<String>>(name: S) -> Self {
Pattern::Namespace(name.into())
}
/// Creates a new `Pattern::Symbol`.
pub fn new_symbol<S: Into<String>>(name: S) -> Self {
Pattern::Symbol(name.into())
}
}
/// A verdict used in the specification of a severity rule.
#[derive(Debug, PartialEq)]
enum Verdict {
Pass,
Fail,
}
/// A severity rule.
#[derive(Debug, PartialEq)]
struct Rule {
pattern: Pattern,
verdict: Verdict,
}
impl Rule {
/// Creates a new severity rule.
pub fn new(pattern: Pattern, verdict: Verdict) -> Self {
Rule { pattern, verdict }
}
}
/// A collection of severity rules.
#[derive(Debug, Default, PartialEq)]
pub struct Rules {
data: Vec<Rule>,
}
impl Rules {
/// Creates a new empty `Rules` object.
pub fn new() -> Self {
Self { data: Vec::new() }
}
/// Loads rules data from a specified file.
///
/// New rules are appended to the already present ones.
pub fn load<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> {
let path = path.as_ref();
let file = PathFile::open(path).map_err(|err| {
Error::new_io(format!("Failed to open file '{}'", path.display()), err)
})?;
self.load_buffer(path, file)
}
/// Loads rules data from a specified reader.
///
/// The `path` should point to the rules file name, indicating the origin of the data. New rules
/// are appended to the already present ones.
pub fn load_buffer<P: AsRef<Path>, R: Read>(
&mut self,
path: P,
reader: R,
) -> Result<(), Error> {
let path = path.as_ref();
debug!("Loading rules data from '{}'", path.display());
// Read all content from the file.
let lines = match read_lines(reader) {
Ok(lines) => lines,
Err(err) => return Err(Error::new_io("Failed to read rules data", err)),
};
// Parse all rules.
let mut new_rules = Vec::new();
for (line_idx, line) in lines.iter().enumerate() {
if let Some(rule) = parse_rule(path, line_idx, line)? {
new_rules.push(rule);
}
}
// Add the new rules.
self.data.append(&mut new_rules);
Ok(())
}
/// Looks for the first rule that matches the specified symbol and, if found, returns its
/// verdict on whether changes to the symbol should be tolerated. Otherwise, returns false.
pub fn is_tolerated(&self, symbol: &str, module: &str, maybe_namespace: Option<&str>) -> bool {
for rule in &self.data {
match &rule.pattern {
Pattern::Module(rule_module) => {
if matches_wildcard(module, rule_module) {
return rule.verdict == Verdict::Pass;
}
}
Pattern::Namespace(rule_namespace) => {
if let Some(namespace) = maybe_namespace {
if matches_wildcard(namespace, rule_namespace) {
return rule.verdict == Verdict::Pass;
}
}
}
Pattern::Symbol(rule_symbol) => {
if matches_wildcard(symbol, rule_symbol) {
return rule.verdict == Verdict::Pass;
}
}
}
}
false
}
}
/// Parses the next word from the `chars` iterator, taking into account comments starting with '#'.
fn get_next_word<I: Iterator<Item = char>>(chars: &mut Peekable<I>) -> Option<String> {
// Skip over any whitespace.
while let Some(&c) = chars.peek() {
if !c.is_ascii_whitespace() {
break;
}
chars.next();
}
// Terminate when a comment starting with '#' is found.
if let Some(&c) = chars.peek() {
if c == '#' {
return None;
}
}
// Read one word.
let mut word = String::new();
while let Some(&c) = chars.peek() {
if c.is_ascii_whitespace() || c == '#' {
break;
}
word.push(c);
chars.next();
}
if word.is_empty() {
return None;
}
Some(word)
}
/// Parses a single severity rule.
fn parse_rule(path: &Path, line_idx: usize, line: &str) -> Result<Option<Rule>, Error> {
let mut chars = line.chars().peekable();
// Parse the pattern.
let pattern = match get_next_word(&mut chars) {
Some(pattern) => {
if pattern.contains('/') || pattern == "vmlinux" {
Pattern::new_module(pattern)
} else if pattern == pattern.to_uppercase() {
Pattern::new_namespace(pattern)
} else {
Pattern::new_symbol(pattern)
}
}
None => {
// The line doesn't contain any rule.
return Ok(None);
}
};
// Parse the verdict.
let verdict = match get_next_word(&mut chars) {
Some(verdict) => match verdict.as_str() {
"PASS" => Verdict::Pass,
"FAIL" => Verdict::Fail,
_ => {
return Err(Error::new_parse(format!(
"{}:{}: Invalid verdict '{}', must be either PASS or FAIL",
path.display(),
line_idx + 1,
verdict
)));
}
},
None => {
return Err(Error::new_parse(format!(
"{}:{}: The rule does not specify a verdict",
path.display(),
line_idx + 1
)));
}
};
// Check that nothing else is left on the line.
if let Some(word) = get_next_word(&mut chars) {
return Err(Error::new_parse(format!(
"{}:{}: Unexpected string '{}' found after the verdict",
path.display(),
line_idx + 1,
word
)));
}
Ok(Some(Rule::new(pattern, verdict)))
}
07070100000018000081A40000000000000000000000016878E92800001DDD000000000000000000000000000000000000003600000000suse-kabi-tools-0.5.0+git0.9ad91db/src/rules/tests.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
use crate::{assert_ok, assert_parse_err, bytes};
#[test]
fn read_module_rule() {
// Check that a pattern containing '/' or equal to "vmlinux" is considered as a module.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"lib/test_module.ko PASS\n",
"vmlinux PASS\n", //
),
);
assert_ok!(result);
assert_eq!(
rules,
Rules {
data: vec![
Rule::new(Pattern::new_module("lib/test_module.ko"), Verdict::Pass),
Rule::new(Pattern::new_module("vmlinux"), Verdict::Pass),
]
}
);
}
#[test]
fn read_namespace_rule() {
// Check that a pattern consisting of only uppercase letter is considered as a namespace.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"TEST_NAMESPACE PASS\n", //
),
);
assert_ok!(result);
assert_eq!(
rules,
Rules {
data: vec![Rule::new(
Pattern::new_namespace("TEST_NAMESPACE"),
Verdict::Pass
),]
}
);
}
#[test]
fn read_symbol_rule() {
// Check that a pattern which isn't recognized as a module or a namespace is considered as
// a symbol.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"symbol_name PASS\n",
"test_module.ko PASS\n",
"vmlinux2 PASS\n",
"test_namespace PASS\n", //
),
);
assert_ok!(result);
assert_eq!(
rules,
Rules {
data: vec![
Rule::new(Pattern::new_symbol("symbol_name"), Verdict::Pass),
Rule::new(Pattern::new_symbol("test_module.ko"), Verdict::Pass),
Rule::new(Pattern::new_symbol("vmlinux2"), Verdict::Pass),
Rule::new(Pattern::new_symbol("test_namespace"), Verdict::Pass),
]
}
);
}
#[test]
fn read_pass_fail_rule() {
// Check that the PASS and FAIL verdicts are correctly recognized.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"symbol_name PASS\n",
"symbol_name2 FAIL\n", //
),
);
assert_ok!(result);
assert_eq!(
rules,
Rules {
data: vec![
Rule::new(Pattern::new_symbol("symbol_name"), Verdict::Pass),
Rule::new(Pattern::new_symbol("symbol_name2"), Verdict::Fail),
]
}
);
}
#[test]
fn read_no_verdict() {
// Check that a rule without a verdict is rejected.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"symbol_name\n", //
),
);
assert_parse_err!(
result,
"test.severities:1: The rule does not specify a verdict"
);
assert_eq!(rules, Rules { data: vec![] });
}
#[test]
fn read_invalid_verdict() {
// Check that an invalid verdict is rejected.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"symbol_name OK\n", //
),
);
assert_parse_err!(
result,
"test.severities:1: Invalid verdict 'OK', must be either PASS or FAIL"
);
assert_eq!(rules, Rules { data: vec![] });
}
#[test]
fn read_extra_data() {
// Check that any extra data after the verdict is rejected.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"symbol_name PASS garbage\n", //
),
);
assert_parse_err!(
result,
"test.severities:1: Unexpected string 'garbage' found after the verdict"
);
assert_eq!(rules, Rules { data: vec![] });
}
#[test]
fn read_empty_record() {
// Check that empty records are skipped when reading a rules file.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"\n", "\n", //
),
);
assert_ok!(result);
assert_eq!(rules, Rules { data: vec![] });
}
#[test]
fn read_comments() {
// Check that comments in various positions are correctly skipped.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"# comment 1\n",
"lib/test_module.ko PASS # comment 2\n",
"lib/test_module2.ko FAIL# comment 3\n", //
),
);
assert_ok!(result);
assert_eq!(
rules,
Rules {
data: vec![
Rule::new(Pattern::new_module("lib/test_module.ko"), Verdict::Pass),
Rule::new(Pattern::new_module("lib/test_module2.ko"), Verdict::Fail),
]
}
);
}
#[test]
fn tolerate_symbol() {
// Check whether a symbol name match in a rules file correctly determines if changes should be
// tolerated/ignored.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"foo PASS\n",
"bar FAIL\n",
"baz* PASS\n", //
),
);
assert_ok!(result);
assert!(rules.is_tolerated("foo", "lib/test_module.ko", None));
assert!(!rules.is_tolerated("bar", "lib/test_module.ko", None));
assert!(rules.is_tolerated("bazi", "lib/test_module.ko", None));
assert!(!rules.is_tolerated("qux", "lib/test_module.ko", None));
}
#[test]
fn tolerate_module() {
// Check whether a module name match in a rules file correctly determines if changes should be
// tolerated/ignored.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"lib/foo.ko PASS\n",
"lib/bar.ko FAIL\n",
"lib/baz*.ko PASS\n", //
),
);
assert_ok!(result);
assert!(rules.is_tolerated("symbol_name", "lib/foo.ko", None));
assert!(!rules.is_tolerated("symbol_name", "lib/bar.ko", None));
assert!(rules.is_tolerated("symbol_name", "lib/bazi.ko", None));
assert!(!rules.is_tolerated("symbol_name", "lib/qux.ko", None));
}
#[test]
fn tolerate_namespace() {
// Check whether a namespace match in a rules file correctly determines if changes should be
// tolerated/ignored.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"FOO_NS PASS\n",
"BAR_NS FAIL\n",
"BAZ*_NS PASS\n", //
),
);
assert_ok!(result);
assert!(rules.is_tolerated("symbol_name", "lib/test_module.ko", Some("FOO_NS")));
assert!(!rules.is_tolerated("symbol_name", "lib/test_module.ko", Some("BAR_NS")));
assert!(rules.is_tolerated("symbol_name", "lib/test_module.ko", Some("BAZI_NS")));
assert!(!rules.is_tolerated("symbol_name", "lib/test_module.ko", Some("QUX_NS")));
}
#[test]
fn tolerate_order() {
// Check that whether a rules file determines if changes should be tolerated/ignored is based on
// the first match, and not the most specific one.
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"foo* PASS\n",
"foobar FAIL\n", //
),
);
assert_ok!(result);
assert!(rules.is_tolerated("foobar", "lib/test_module.ko", None));
}
07070100000019000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003000000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symtypes0707010000001A000081A40000000000000000000000016878E9280000AE51000000000000000000000000000000000000003700000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symtypes/mod.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use crate::text::{DirectoryWriter, Filter, WriteGenerator, Writer, read_lines, unified_diff};
use crate::{Error, MapIOErr, PathFile, Size, debug, hash};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::{HashMap, HashSet};
use std::io::prelude::*;
use std::iter::zip;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, RwLock};
use std::{array, fs, mem, thread};
#[cfg(test)]
mod tests;
#[cfg(test)]
mod tests_format;
// Notes:
// [1] The module uses several HashMaps that are indexed by Strings. Rust allows to do a lookup in
// such a HashMap using &str. Unfortunately, stable Rust (1.84) currently doesn't offer to do
// this lookup but insert the key as String if it is missing. Depending on a specific case and
// what is likely to produce less overhead, the code opts to turn the key already to a String on
// the first lookup, or opts to run the search again if the key is missing and needs inserting.
// [2] HashSet in the stable Rust (1.84) doesn't provide the entry functionality. It is
// a nightly-only experimental API and so not used by the module.
/// A token used in the description of a type.
#[derive(Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
enum Token {
TypeRef(String),
Atom(String),
}
impl Token {
/// Creates a new `Token::TypeRef`.
fn new_typeref<S: Into<String>>(name: S) -> Self {
Self::TypeRef(name.into())
}
/// Creates a new `Token::Atom`.
fn new_atom<S: Into<String>>(name: S) -> Self {
Self::Atom(name.into())
}
/// Returns the token data as a string slice.
fn as_str(&self) -> &str {
match self {
Self::TypeRef(ref_name) => ref_name.as_str(),
Self::Atom(word) => word.as_str(),
}
}
}
/// A sequence of tokens, describing one type.
type Tokens = Vec<Token>;
/// A collection of all variants of the same type name in a given corpus.
type TypeVariants = Vec<Arc<Tokens>>;
/// A mapping from a type name to all its known variants.
type Types = HashMap<String, TypeVariants>;
/// An array of `Types`, indexed by `type_bucket_idx(type_name)`. This allows each bucket to be
/// protected by a separate lock when reading symtypes data.
type TypeBuckets = [Types; 256];
/// Computes the index into `TypeBuckets` for a given type name.
fn type_bucket_idx(type_name: &str) -> usize {
(hash(type_name) % TypeBuckets::SIZE as u64) as usize
}
/// A mapping from a symbol name to an index in `SymtypesFiles`, specifying in which file the symbol
/// is defined.
type Exports = HashMap<String, usize>;
/// A mapping from a type name to `Tokens`, specifying the type in a given file.
type FileRecords = HashMap<String, Arc<Tokens>>;
/// A representation of a single symtypes file.
#[derive(Debug, Eq, PartialEq)]
struct SymtypesFile {
path: PathBuf,
records: FileRecords,
}
/// A collection of symtypes files.
type SymtypesFiles = Vec<SymtypesFile>;
/// A representation of a kernel ABI, loaded from symtypes files.
///
/// * The `types` collection stores all types and their variants.
/// * The `files` collection records types in individual symtypes files.
/// * The `exports` collection provides all exports in the corpus. Each export uses an index to
/// reference its origin in `files`.
///
/// For instance, consider the following corpus consisting of two files `test_a.symtypes` and
/// `test_b.symtypes`:
///
/// * `test_a.symtypes`:
///
/// ```text
/// s#foo struct foo { int a ; }
/// bar int bar ( s#foo )
/// ```
///
/// * `test_b.symtypes`:
///
/// ```text
/// s#foo struct foo { UNKNOWN }
/// baz int baz ( s#foo )
/// ```
///
/// The corpus has two exports `bar` and `baz`, with each referencing structure `foo`, but with
/// different definitions, one is complete and one is incomplete.
///
/// The data would be represented as follows:
///
/// The example assumes `type_bucket_idx("s#foo") % TypesBuckets::SIZE` evaluates to 1, and that
/// `type_bucket_idx("bar") % TypesBuckets::SIZE` and `type_bucket_idx("baz") % TypesBuckets::SIZE`
/// both evaluate to 3.
///
/// ```text
/// foo_tokens = Arc { Tokens[ Atom("struct"), Atom("foo"), Atom("{"), Atom("int"), Atom("a"), Atom(";"), Atom("}") ] }
/// foo2_tokens = Arc { Tokens[ Atom("struct"), Atom("foo"), Atom("{"), Atom("UNKNOWN"), Atom("}") ] }
/// bar_tokens = Arc { Tokens[ Atom("int"), Atom("bar"), Atom("("), TypeRef("s#foo"), Atom(")") ] }
/// baz_tokens = Arc { Tokens[ Atom("int"), Atom("baz"), Atom("("), TypeRef("s#foo"), Atom(")") ] }
/// corpus = SymtypesCorpus {
/// types: TypesBuckets {
/// [0]: Types { },
/// [1]: Types {
/// "s#foo": TypeVariants[ foo_tokens, foo2_tokens ]
/// },
/// [2]: Types { },
/// [3]: Types {
/// "bar": TypeVariants[ bar_tokens ],
/// "baz": TypeVariants[ baz_tokens ],
/// },
/// [4..TypesBuckets::SIZE] = Types { },
/// },
/// exports: Exports {
/// "bar": 0,
/// "baz": 1,
/// },
/// files: SymtypesFiles[
/// SymtypesFile {
/// path: PathBuf("test_a.symtypes"),
/// records: FileRecords {
/// "s#foo": foo_tokens,
/// "bar": bar_tokens,
/// }
/// },
/// SymtypesFile {
/// path: PathBuf("test_b.symtypes"),
/// records: FileRecords {
/// "s#foo": foo2_tokens,
/// "baz": baz_tokens,
/// }
/// },
/// ],
/// }
/// ```
///
/// Note importantly that if a `Token` in `TypeVariants` is a `TypeRef` then the reference only
/// specifies a name of the target type, e.g. `s#foo` above. The actual type variant must be
/// determined based on what file is being processed. This allows to trivially merge `Tokens` and
/// limit memory needed to store the corpus. On the other hand, when comparing two `Tokens` vectors
/// for ABI equality, the code needs to consider whether all referenced subtypes are actually equal
/// as well.
#[derive(Debug, Eq, PartialEq)]
pub struct SymtypesCorpus {
types: TypeBuckets,
exports: Exports,
files: SymtypesFiles,
}
/// A helper struct to provide synchronized access to all corpus data and a warnings stream during
/// parallel loading.
struct LoadContext<'a> {
types: [RwLock<Types>; TypeBuckets::SIZE],
exports: Mutex<Exports>,
files: Mutex<SymtypesFiles>,
warnings: Mutex<Box<dyn Write + Send + 'a>>,
}
/// Type names active during the loading of a specific file, providing for each type its tokens and
/// source line index.
type LoadActiveTypes = HashMap<String, (Arc<Tokens>, usize)>;
/// Changes between two corpuses, recording a tuple of each modified type's name, its old tokens and
/// its new tokens, along with a [`Vec`] of exported symbols affected by the change.
type CompareChangedTypes<'a> = HashMap<(&'a str, &'a Tokens, &'a Tokens), Vec<&'a str>>;
/// Type names processed during the comparison for a specific file.
type CompareFileTypes<'a> = HashSet<&'a str>;
impl<'a> LoadContext<'a> {
/// Creates a new load context from a symtypes corpus and a warnings stream.
fn from<W: Write + Send + 'a>(mut symtypes: SymtypesCorpus, warnings: W) -> Self {
Self {
types: array::from_fn(|i| RwLock::new(mem::take(&mut symtypes.types[i]))),
exports: Mutex::new(mem::take(&mut symtypes.exports)),
files: Mutex::new(mem::take(&mut symtypes.files)),
warnings: Mutex::new(Box::new(warnings)),
}
}
/// Consumes this load context, returning the underlying data.
fn into_inner(mut self) -> SymtypesCorpus {
SymtypesCorpus {
types: array::from_fn(|i| mem::take(&mut self.types[i]).into_inner().unwrap()),
exports: self.exports.into_inner().unwrap(),
files: self.files.into_inner().unwrap(),
}
}
}
impl Default for SymtypesCorpus {
fn default() -> Self {
Self::new()
}
}
impl SymtypesCorpus {
/// Creates a new empty corpus.
pub fn new() -> Self {
Self {
types: array::from_fn(|_| Types::new()),
exports: Exports::new(),
files: SymtypesFiles::new(),
}
}
/// Loads symtypes data from a given location.
///
/// The `path` can point to a single symtypes file or a directory. In the latter case, the
/// function recursively collects all symtypes in that directory and loads them.
pub fn load<P: AsRef<Path>, W: Write + Send>(
&mut self,
path: P,
warnings: W,
num_workers: i32,
) -> Result<(), Error> {
let path = path.as_ref();
// Determine if the input is a directory tree or a single symtypes file.
let md = fs::metadata(path).map_err(|err| {
Error::new_io(format!("Failed to query path '{}'", path.display()), err)
})?;
if md.is_dir() {
// Recursively collect symtypes files within the directory.
let mut symfiles = Vec::new();
Self::collect_symfiles(path, Path::new(""), &mut symfiles)?;
// Load all found files.
self.load_symfiles(
path,
&symfiles.iter().map(Path::new).collect::<Vec<&Path>>(),
warnings,
num_workers,
)
} else {
// Load the single file.
self.load_symfiles(Path::new(""), &[path], warnings, num_workers)
}
}
/// Collects recursively all symtypes files under the given root path and its subpath.
fn collect_symfiles(
root: &Path,
sub_path: &Path,
symfiles: &mut Vec<PathBuf>,
) -> Result<(), Error> {
let path = root.join(sub_path);
let dir_iter = fs::read_dir(&path).map_err(|err| {
Error::new_io(
format!("Failed to read directory '{}'", path.display()),
err,
)
})?;
for maybe_entry in dir_iter {
let entry = maybe_entry.map_err(|err| {
Error::new_io(
format!("Failed to read directory '{}'", path.display()),
err,
)
})?;
let entry_path = entry.path();
let md = fs::symlink_metadata(&entry_path).map_err(|err| {
Error::new_io(
format!("Failed to query path '{}'", entry_path.display()),
err,
)
})?;
if md.is_symlink() {
continue;
}
let entry_sub_path = sub_path.join(entry.file_name());
if md.is_dir() {
Self::collect_symfiles(root, &entry_sub_path, symfiles)?;
continue;
}
let ext = match entry_sub_path.extension() {
Some(ext) => ext,
None => continue,
};
if ext == "symtypes" {
symfiles.push(entry_sub_path);
}
}
Ok(())
}
/// Loads all specified symtypes files.
fn load_symfiles<W: Write + Send>(
&mut self,
root: &Path,
symfiles: &[&Path],
warnings: W,
num_workers: i32,
) -> Result<(), Error> {
// Load data from the files.
let next_work_idx = AtomicUsize::new(0);
let load_context = LoadContext::from(mem::take(self), warnings);
thread::scope(|s| -> Result<(), Error> {
let mut workers = Vec::new();
for _ in 0..num_workers {
workers.push(s.spawn(|| {
loop {
let work_idx = next_work_idx.fetch_add(1, Ordering::Relaxed);
if work_idx >= symfiles.len() {
return Ok(());
}
let sub_path = symfiles[work_idx];
let path = root.join(sub_path);
let file = PathFile::open(&path).map_err(|err| {
Error::new_io(format!("Failed to open file '{}'", path.display()), err)
})?;
Self::load_inner(sub_path, file, &load_context)?;
}
}));
}
// Join all worker threads. Return the first error if any is found, others are silently
// swallowed which is ok.
for worker in workers {
worker.join().unwrap()?
}
Ok(())
})?;
*self = load_context.into_inner();
Ok(())
}
/// Loads symtypes data from a specified reader.
///
/// The `path` should point to a symtypes file name, indicating the origin of the data.
pub fn load_buffer<P: AsRef<Path>, R: Read, W: Write + Send>(
&mut self,
path: P,
reader: R,
warnings: W,
) -> Result<(), Error> {
let path = path.as_ref();
let load_context = LoadContext::from(mem::take(self), warnings);
Self::load_inner(path, reader, &load_context)?;
*self = load_context.into_inner();
Ok(())
}
/// Loads symtypes data from a specified reader.
fn load_inner<R: Read>(
path: &Path,
reader: R,
load_context: &LoadContext,
) -> Result<(), Error> {
debug!("Loading symtypes data from '{}'", path.display());
// Read all content from the file.
let lines = match read_lines(reader) {
Ok(lines) => lines,
Err(err) => return Err(Error::new_io("Failed to read symtypes data", err)),
};
// Detect whether the input is a single or consolidated symtypes file.
let is_consolidated =
!lines.is_empty() && lines[0].starts_with("/* ") && lines[0].ends_with(" */");
let mut file_idx = if !is_consolidated {
Self::add_file(path, load_context)
} else {
usize::MAX
};
// Track which records are currently active and all per-file overrides for UNKNOWN
// definitions if this is a consolidated file.
let mut active_types = LoadActiveTypes::new();
let mut local_override = LoadActiveTypes::new();
let mut records = FileRecords::new();
// Parse all declarations.
for (line_idx, line) in lines.iter().enumerate() {
// Skip empty lines in consolidated files.
if is_consolidated && line.is_empty() {
continue;
}
// Handle file headers in consolidated files.
if is_consolidated && line.starts_with("/* ") && line.ends_with(" */") {
// Complete the current file.
if file_idx != usize::MAX {
Self::close_file(
path,
file_idx,
mem::take(&mut records),
mem::take(&mut local_override),
&active_types,
load_context,
)?;
}
// Open the new file.
file_idx = Self::add_file(Path::new(&line[3..line.len() - 3]), load_context);
continue;
}
// Ok, it is a regular record, parse it.
let (name, tokens, is_local_override) =
parse_type_record(path, line_idx, line, is_consolidated)?;
// Check if the record is a duplicate of another one.
if records.contains_key(&name) {
return Err(Error::new_parse(format!(
"{}:{}: Duplicate record '{}'",
path.display(),
line_idx + 1,
name,
)));
}
// Insert the type into the corpus and file records.
let tokens_rc = Self::merge_type(&name, tokens, load_context);
if is_export_name(&name) {
Self::insert_export(&name, file_idx, line_idx, load_context)?;
}
records.insert(name.clone(), Arc::clone(&tokens_rc));
// Record the type as currently active.
if is_local_override {
local_override.insert(name, (tokens_rc, line_idx));
} else {
active_types.insert(name, (tokens_rc, line_idx));
}
}
// Complete the file.
if file_idx != usize::MAX {
Self::close_file(
path,
file_idx,
records,
local_override,
&active_types,
load_context,
)?;
}
Ok(())
}
/// Adds a specified file to the corpus.
///
/// Note that in the case of a consolidated file, unlike most load functions, the `path` should
/// point to the name of the specific symtypes file.
fn add_file(path: &Path, load_context: &LoadContext) -> usize {
let symfile = SymtypesFile {
path: path.to_path_buf(),
records: FileRecords::new(),
};
let mut files = load_context.files.lock().unwrap();
files.push(symfile);
files.len() - 1
}
/// Completes loading of the symtypes file specified by `file_idx` by extrapolating its records,
/// validating all references, and finally adding the file records to the corpus.
fn close_file(
path: &Path,
file_idx: usize,
mut records: FileRecords,
local_override: LoadActiveTypes,
active_types: &LoadActiveTypes,
load_context: &LoadContext,
) -> Result<(), Error> {
// Extrapolate all records and validate references.
let walk_records = records.keys().map(String::clone).collect::<Vec<_>>();
for name in walk_records {
// Note that all explicit types are known, so it is ok to pass usize::MAX as
// from_line_idx because it is unused.
Self::complete_file_record(
path,
usize::MAX,
&name,
true,
&local_override,
active_types,
&mut records,
)?;
}
// Add the file records to the corpus.
let mut files = load_context.files.lock().unwrap();
files[file_idx].records = records;
Ok(())
}
/// Adds the given type definition to the corpus if it's not already present, and returns its
/// reference-counted pointer.
fn merge_type(type_name: &str, tokens: Tokens, load_context: &LoadContext) -> Arc<Tokens> {
// Types are often repeated in different symtypes files. Try to find an existing type only
// under the read lock first.
{
let types = load_context.types[type_bucket_idx(type_name)]
.read()
.unwrap();
if let Some(variants) = types.get(type_name) {
for variant_rc in variants {
if tokens == **variant_rc {
return Arc::clone(variant_rc);
}
}
}
}
let mut types = load_context.types[type_bucket_idx(type_name)]
.write()
.unwrap();
match types.get_mut(type_name) {
Some(variants) => {
for variant_rc in variants.iter() {
if tokens == **variant_rc {
return Arc::clone(variant_rc);
}
}
let tokens_rc = Arc::new(tokens);
variants.push(Arc::clone(&tokens_rc));
tokens_rc
}
None => {
let tokens_rc = Arc::new(tokens);
types.insert(type_name.to_string(), vec![Arc::clone(&tokens_rc)]); // [1]
tokens_rc
}
}
}
/// Registers the specified export in the corpus and validates that it is not a duplicate.
fn insert_export(
type_name: &str,
file_idx: usize,
line_idx: usize,
load_context: &LoadContext,
) -> Result<(), Error> {
// Add the export, if it is unique.
let other_file_idx = {
let mut exports = load_context.exports.lock().unwrap();
match exports.entry(type_name.to_string()) // [1]
{
Occupied(export_entry) => *export_entry.get(),
Vacant(export_entry) => {
export_entry.insert(file_idx);
return Ok(());
}
}
};
// Report the duplicate export as a warning. Although technically an error, some auxiliary
// kernel components that are not part of vmlinux/modules may reuse logic from the rest of
// the kernel by including its C/assembly files, which may contain export directives. If
// these components aren't correctly configured to disable exports, collecting all symtypes
// from the build will result in duplicate symbols. This should be fixed in the kernel.
// However, we want to proceed, especially if this is the compare command, where we want to
// report actual kABI differences.
let message = {
let files = load_context.files.lock().unwrap();
let path = &files[file_idx].path;
let other_path = &files[other_file_idx].path;
format!(
"{}:{}: WARNING: Export '{}' is duplicate, previous occurrence found in '{}'",
path.display(),
line_idx + 1,
type_name,
other_path.display()
)
};
let mut warnings = load_context.warnings.lock().unwrap();
writeln!(warnings, "{}", message)
.map_io_err("Failed to write a duplicate-export warning")?;
Ok(())
}
/// Completes a type record by validating all its references and, in the case of a consolidated
/// source, enhances the specified file records with the necessary implicit types.
///
/// In a consolidated file, a file entry can omit types that the file contains if those types
/// were previously defined by another file. This function finds all such implicit references
/// and adds them to `records`.
///
/// A caller of this function should pre-fill `records` with all explicit types present in
/// a file entry and then call this function on each of those types. These root calls should be
/// invoked with `is_explicit` set to `true`. The function then recursively adds all needed
/// implicit types that are referenced from these roots.
fn complete_file_record(
path: &Path,
from_line_idx: usize,
type_name: &str,
is_explicit: bool,
local_override: &LoadActiveTypes,
active_types: &LoadActiveTypes,
records: &mut FileRecords,
) -> Result<(), Error> {
if is_explicit {
// All explicit symbols need to be added by the caller.
assert!(records.get(type_name).is_some());
} else {
// See if the symbol was already processed.
if records.get(type_name).is_some() {
return Ok(());
}
}
let (tokens_rc, line_idx) = match local_override.get(type_name) {
Some(&(ref tokens_rc, line_idx)) => (Arc::clone(tokens_rc), line_idx),
None => match active_types.get(type_name) {
Some(&(ref tokens_rc, line_idx)) => (Arc::clone(tokens_rc), line_idx),
None => {
return Err(Error::new_parse(format!(
"{}:{}: Type '{}' is not known",
path.display(),
from_line_idx + 1,
type_name
)));
}
},
};
if !is_explicit {
records.insert(type_name.to_string(), Arc::clone(&tokens_rc)); // [1]
}
// Process recursively all types referenced by this symbol.
for token in tokens_rc.iter() {
match token {
Token::TypeRef(ref_name) => {
Self::complete_file_record(
path,
line_idx,
ref_name,
false,
local_override,
active_types,
records,
)?;
}
Token::Atom(_word) => {}
}
}
Ok(())
}
/// Writes the corpus in the consolidated form to the specified file.
pub fn write_consolidated<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> {
self.write_consolidated_buffer(Writer::new_file(path)?)
}
/// Writes the corpus in the consolidated form to the provided output stream.
pub fn write_consolidated_buffer<W: Write>(&self, mut writer: W) -> Result<(), Error> {
let err_desc = "Failed to write a consolidated record";
// Track which records are currently active, mapping a type name to its tokens.
let mut active_types = HashMap::<&String, &Arc<Tokens>>::new();
// Sort all files in the corpus by their path.
let mut file_indices = (0..self.files.len()).collect::<Vec<_>>();
file_indices.sort_by_key(|&i| &self.files[i].path);
// Process the sorted files and add their types to the output.
let mut add_separator = false;
for i in file_indices {
let symfile = &self.files[i];
// Sort all types in the file.
let mut sorted_types = self.files[i].records.iter().collect::<Vec<_>>();
sorted_types.sort_by_cached_key(|&(name, _)| (is_export_name(name), name));
// Add an empty line to separate individual files.
if add_separator {
writeln!(writer).map_io_err(err_desc)?;
} else {
add_separator = true;
}
// Write the file header.
writeln!(writer, "/* {} */", symfile.path.display()).map_io_err(err_desc)?;
// Write all output types.
for (name, tokens_rc) in sorted_types {
// Check if this is an UNKNOWN type definition, and if so, record it as a local
// override.
if let Some(short_name) = try_shorten_decl(name, tokens_rc) {
writeln!(writer, "{}", short_name).map_io_err(err_desc)?;
continue;
}
// See if the symbol matches an already active definition, or record it in the
// output.
let record = match active_types.entry(name) {
Occupied(mut active_type_entry) => {
if *active_type_entry.get() != tokens_rc {
active_type_entry.insert(tokens_rc);
true
} else {
false
}
}
Vacant(active_type_entry) => {
active_type_entry.insert(tokens_rc);
true
}
};
if record {
write!(writer, "{}", name).map_io_err(err_desc)?;
for token in tokens_rc.iter() {
write!(writer, " {}", token.as_str()).map_io_err(err_desc)?;
}
writeln!(writer).map_io_err(err_desc)?;
}
}
}
writer.flush().map_io_err(err_desc)?;
Ok(())
}
/// Writes the corpus in the split form to the specified directory.
pub fn write_split<P: AsRef<Path>>(&self, path: P, num_workers: i32) -> Result<(), Error> {
self.write_split_buffer(&mut DirectoryWriter::new_file(path), num_workers)
}
/// Writes the corpus in the split form to the provided output stream factory.
pub fn write_split_buffer<W: Write, WG: WriteGenerator<W> + Send>(
&self,
dir_writer: WG,
num_workers: i32,
) -> Result<(), Error> {
let err_desc = "Failed to write a split record";
let next_work_idx = AtomicUsize::new(0);
let dir_writer = Mutex::new(dir_writer);
thread::scope(|s| -> Result<(), Error> {
let mut workers = Vec::new();
for _ in 0..num_workers {
workers.push(s.spawn(|| {
loop {
let work_idx = next_work_idx.fetch_add(1, Ordering::Relaxed);
if work_idx >= self.files.len() {
break;
}
let symfile = &self.files[work_idx];
// Sort all types in the file.
let mut sorted_types = symfile.records.iter().collect::<Vec<_>>();
sorted_types.sort_by_cached_key(|&(name, _)| (is_export_name(name), name));
// Create an output file.
let mut writer = {
let mut dir_writer = dir_writer.lock().unwrap();
dir_writer.create(&symfile.path)?
};
// Write all types into the output file.
for (name, tokens_rc) in sorted_types {
write!(writer, "{}", name).map_io_err(err_desc)?;
for token in tokens_rc.iter() {
write!(writer, " {}", token.as_str()).map_io_err(err_desc)?;
}
writeln!(writer).map_io_err(err_desc)?;
}
// Close the file.
writer.flush().map_io_err(err_desc)?;
let mut dir_writer = dir_writer.lock().unwrap();
dir_writer.close(writer)
}
Ok(())
}));
}
// Join all worker threads. Return the first error if any is found, others are silently
// swallowed which is ok.
for worker in workers {
worker.join().unwrap()?
}
Ok(())
})?;
Ok(())
}
/// Compares the definitions of the given symbol in two files.
///
/// If the immediate definition of the symbol differs between the two files then it gets added
/// in `changes`. The `export` parameter identifies the top-level exported symbol affected by
/// the change.
///
/// The specified symbol is added to `processed_types`, if it's not already present, and all its
/// type references get recursively processed in the same way.
fn compare_types<'a>(
file: &'a SymtypesFile,
other_file: &'a SymtypesFile,
name: &'a str,
export: &'a str,
changes: &Mutex<CompareChangedTypes<'a>>,
processed: &mut CompareFileTypes<'a>,
) {
// See if the symbol was already processed.
if processed.get(name).is_some() {
return;
}
processed.insert(name); // [2]
// Look up how the symbol is defined in each file.
// SAFETY: Each type reference is guaranteed to have a corresponding definition.
let tokens = &**file.records.get(name).unwrap();
let other_tokens = &**other_file.records.get(name).unwrap();
// Compare the immediate tokens.
let is_equal = tokens.len() == other_tokens.len()
&& zip(tokens.iter(), other_tokens.iter())
.all(|(token, other_token)| token == other_token);
if !is_equal {
let mut changes = changes.lock().unwrap();
changes
.entry((name, tokens, other_tokens))
.or_default()
.push(export);
}
// Compare recursively same referenced types. This can be done trivially if the tokens are
// equal. If they are not, try hard (and slowly) to find any matching types.
if is_equal {
for token in tokens {
if let Token::TypeRef(ref_name) = token {
Self::compare_types(
file,
other_file,
ref_name.as_str(),
export,
changes,
processed,
);
}
}
} else {
for token in tokens {
if let Token::TypeRef(ref_name) = token {
for other_token in other_tokens {
if let Token::TypeRef(other_ref_name) = other_token {
if ref_name == other_ref_name {
Self::compare_types(
file,
other_file,
ref_name.as_str(),
export,
changes,
processed,
);
break;
}
}
}
}
}
}
}
/// Compares the symbols in `self` and `other_symtypes` and writes a human-readable report about
/// all found changes to the specified file.
pub fn compare_with<P: AsRef<Path>>(
&self,
other_symtypes: &SymtypesCorpus,
maybe_filter: Option<&Filter>,
path: P,
num_workers: i32,
) -> Result<(), Error> {
self.compare_with_buffer(
other_symtypes,
maybe_filter,
Writer::new_file(path)?,
num_workers,
)
}
/// Compares the symbols in `self` and `other_symtypes` and writes a human-readable report about
/// all found changes to the provided output stream.
pub fn compare_with_buffer<W: Write>(
&self,
other_symtypes: &SymtypesCorpus,
maybe_filter: Option<&Filter>,
mut writer: W,
num_workers: i32,
) -> Result<(), Error> {
fn matches(maybe_filter: Option<&Filter>, name: &str) -> bool {
match maybe_filter {
Some(filter) => filter.matches(name),
None => true,
}
}
let err_desc = "Failed to write a comparison result";
// Check for symbols in self but not in other_symtypes, and vice versa.
for (exports_a, exports_b, change) in [
(&self.exports, &other_symtypes.exports, "removed"),
(&other_symtypes.exports, &self.exports, "added"),
] {
let mut changed = exports_a
.keys()
.filter(|&name| matches(maybe_filter, name) && !exports_b.contains_key(name))
.collect::<Vec<_>>();
changed.sort();
for name in changed {
writeln!(writer, "Export '{}' has been {}", name, change).map_io_err(err_desc)?;
}
}
// Compare symbols that are in both corpuses.
let works = self
.exports
.iter()
.filter(|&(name, _)| matches(maybe_filter, name))
.collect::<Vec<_>>();
let next_work_idx = AtomicUsize::new(0);
let changes = Mutex::new(CompareChangedTypes::new());
thread::scope(|s| {
for _ in 0..num_workers {
s.spawn(|| {
loop {
let work_idx = next_work_idx.fetch_add(1, Ordering::Relaxed);
if work_idx >= works.len() {
break;
}
let (name, file_idx) = works[work_idx];
let file = &self.files[*file_idx];
if let Some(other_file_idx) = other_symtypes.exports.get(name) {
let other_file = &other_symtypes.files[*other_file_idx];
let mut processed = CompareFileTypes::new();
Self::compare_types(
file,
other_file,
name,
name,
&changes,
&mut processed,
);
}
}
});
}
});
// Format and output collected changes.
let changes = changes.into_inner().unwrap(); // Get the inner HashMap.
let mut changes = changes.into_iter().collect::<Vec<_>>();
changes.iter_mut().for_each(|(_, exports)| exports.sort());
changes.sort();
let mut add_separator = false;
for ((name, tokens, other_tokens), exports) in changes {
// Add an empty line to separate individual changes.
if add_separator {
writeln!(writer).map_io_err(err_desc)?;
} else {
add_separator = true;
}
writeln!(
writer,
"The following '{}' exports are different:",
exports.len()
)
.map_io_err(err_desc)?;
for export in exports {
writeln!(writer, " {}", export).map_io_err(err_desc)?;
}
writeln!(writer).map_io_err(err_desc)?;
writeln!(writer, "because of a changed '{}':", name).map_io_err(err_desc)?;
write_type_diff(tokens, other_tokens, writer.by_ref())?;
}
writer.flush().map_io_err(err_desc)?;
Ok(())
}
}
/// Reads words from a given iterator and converts them to `Tokens`.
fn words_into_tokens<'a, I: Iterator<Item = &'a str>>(words: &mut I) -> Tokens {
let mut tokens = Tokens::new();
for word in words {
let mut is_typeref = false;
if let Some(ch) = word.chars().nth(1) {
if ch == '#' {
is_typeref = true;
}
}
tokens.push(if is_typeref {
Token::new_typeref(word)
} else {
Token::new_atom(word)
});
}
tokens
}
/// Returns whether the specified type name is an export definition, as opposed to a `<X>#<foo>`
/// type definition.
fn is_export_name(type_name: &str) -> bool {
match type_name.chars().nth(1) {
Some(ch) => ch != '#',
None => true,
}
}
/// Tries to shorten the specified type if it represents an UNKNOWN declaration.
///
/// The function maps records like
/// `<short-type>#<name> <type> <name> { UNKNOWN }`
/// to
/// `<short-type>##<name>`.
/// For instance, `s#task_struct struct task_struct { UNKNOWN }` becomes `s##task_struct`.
fn try_shorten_decl(type_name: &str, tokens: &Tokens) -> Option<String> {
if tokens.len() != 5 {
return None;
}
if let Some((short_type, expanded_type, base_name)) = split_type_name(type_name, "#") {
let unknown = [expanded_type, base_name, "{", "UNKNOWN", "}"];
if zip(tokens.iter(), unknown.into_iter()).all(|(token, check)| token.as_str() == check) {
return Some(format!("{}##{}", short_type, base_name));
}
}
None
}
/// Tries to expand the specified type if it represents an UNKNOWN declaration.
///
/// The function maps records like
/// `<short-type>##<name>`
/// to
/// `<short-type>#<name> <type> <name> { UNKNOWN }`.
/// For instance, `s##task_struct` becomes `s#task_struct struct task_struct { UNKNOWN }`.
fn try_expand_decl(type_name: &str) -> Option<(String, Tokens)> {
if let Some((short_type, expanded_type, base_name)) = split_type_name(type_name, "##") {
let type_name = format!("{}#{}", short_type, base_name);
let tokens = vec![
Token::new_atom(expanded_type),
Token::new_atom(base_name),
Token::new_atom("{"),
Token::new_atom("UNKNOWN"),
Token::new_atom("}"),
];
return Some((type_name, tokens));
}
None
}
/// Splits the specified type name into three string slices: the short type name, the long type
/// name, and the base name. For instance, `s#task_struct` is split into
/// `("s", "struct", "task_struct")`.
fn split_type_name<'a>(type_name: &'a str, delimiter: &str) -> Option<(&'a str, &'a str, &'a str)> {
match type_name.split_once(delimiter) {
Some((short_type, base_name)) => {
let expanded_type = match short_type {
"t" => "typedef",
"e" => "enum",
"s" => "struct",
"u" => "union",
_ => return None,
};
Some((short_type, expanded_type, base_name))
}
None => None,
}
}
/// Parses a single symtypes record.
fn parse_type_record(
path: &Path,
line_idx: usize,
line: &str,
is_consolidated: bool,
) -> Result<(String, Tokens, bool), Error> {
let mut words = line.split_ascii_whitespace();
let raw_name = words.next().ok_or_else(|| {
Error::new_parse(format!(
"{}:{}: Expected a record name",
path.display(),
line_idx + 1
))
})?;
if is_consolidated {
// Check if it is an UNKNOWN override.
if let Some((name, tokens)) = try_expand_decl(raw_name) {
// TODO Check that all words have been exhausted.
return Ok((name, tokens, true));
}
}
// TODO Check that no ## is present in the type name.
// Turn the remaining words into tokens.
let tokens = words_into_tokens(&mut words);
Ok((raw_name.to_string(), tokens, false))
}
/// Processes tokens describing a type and produces its pretty-formatted version as a [`Vec`] of
/// [`String`] lines.
fn pretty_format_type(tokens: &Tokens) -> Vec<String> {
// Iterate over all tokens and produce the formatted output.
let mut res = Vec::new();
let mut indent: usize = 0;
let mut line = String::new();
for token in tokens {
// Handle the closing bracket and parenthesis early, they end any prior line and reduce
// indentation.
if token.as_str() == "}" || token.as_str() == ")" {
if !line.is_empty() {
res.push(line);
}
indent = indent.saturating_sub(1);
line = String::new();
}
// Insert any newline indentation.
let is_first = line.is_empty();
if is_first {
for _ in 0..indent {
line.push('\t');
}
}
// Check if the token is special and append it appropriately to the output.
match token.as_str() {
"{" | "(" => {
if !is_first {
line.push(' ');
}
line.push_str(token.as_str());
res.push(line);
indent = indent.saturating_add(1);
line = String::new();
}
"}" | ")" => {
line.push_str(token.as_str());
}
";" => {
line.push(';');
res.push(line);
line = String::new();
}
"," => {
line.push(',');
res.push(line);
line = String::new();
}
_ => {
if !is_first {
line.push(' ');
}
line.push_str(token.as_str());
}
};
}
if !line.is_empty() {
res.push(line);
}
res
}
/// Formats a unified diff between two supposedly different types and writes it to the provided
/// output stream.
fn write_type_diff<W: Write>(
tokens: &Tokens,
other_tokens: &Tokens,
writer: W,
) -> Result<(), Error> {
let pretty = pretty_format_type(tokens);
let other_pretty = pretty_format_type(other_tokens);
unified_diff(&pretty, &other_pretty, writer)
}
0707010000001B000081A40000000000000000000000016878E928000048B9000000000000000000000000000000000000003900000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symtypes/tests.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
use crate::{assert_ok, assert_parse_err, bytes};
#[test]
fn read_basic_single() {
// Check basic reading of a single file.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { }\n",
"bar void bar ( s#foo )\n",
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let foo_tokens_rc = Arc::new(vec![
Token::new_atom("struct"),
Token::new_atom("foo"),
Token::new_atom("{"),
Token::new_atom("}"),
]);
let bar_tokens_rc = Arc::new(vec![
Token::new_atom("void"),
Token::new_atom("bar"),
Token::new_atom("("),
Token::new_typeref("s#foo"),
Token::new_atom(")"),
]);
let baz_tokens_rc = Arc::new(vec![
Token::new_atom("int"),
Token::new_atom("baz"),
Token::new_atom("("),
Token::new_atom(")"),
]);
let mut exp_symtypes = SymtypesCorpus {
types: array::from_fn(|_| Types::new()),
exports: HashMap::from([("bar".to_string(), 0), ("baz".to_string(), 0)]),
files: vec![SymtypesFile {
path: PathBuf::from("test.symtypes"),
records: HashMap::from([
("s#foo".to_string(), Arc::clone(&foo_tokens_rc)),
("bar".to_string(), Arc::clone(&bar_tokens_rc)),
("baz".to_string(), Arc::clone(&baz_tokens_rc)),
]),
}],
};
exp_symtypes.types[type_bucket_idx("s#foo")]
.insert("s#foo".to_string(), vec![Arc::clone(&foo_tokens_rc)]);
exp_symtypes.types[type_bucket_idx("bar")]
.insert("bar".to_string(), vec![Arc::clone(&bar_tokens_rc)]);
exp_symtypes.types[type_bucket_idx("baz")]
.insert("baz".to_string(), vec![Arc::clone(&baz_tokens_rc)]);
assert_eq!(symtypes, exp_symtypes);
}
#[test]
fn read_basic_consolidated() {
// Check basic reading of a consolidated file.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test_consolidated.symtypes",
bytes!(
"/* test.symtypes */\n",
"s#foo struct foo { }\n",
"bar void bar ( s#foo )\n",
"/* test2.symtypes */\n",
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let foo_tokens_rc = Arc::new(vec![
Token::new_atom("struct"),
Token::new_atom("foo"),
Token::new_atom("{"),
Token::new_atom("}"),
]);
let bar_tokens_rc = Arc::new(vec![
Token::new_atom("void"),
Token::new_atom("bar"),
Token::new_atom("("),
Token::new_typeref("s#foo"),
Token::new_atom(")"),
]);
let baz_tokens_rc = Arc::new(vec![
Token::new_atom("int"),
Token::new_atom("baz"),
Token::new_atom("("),
Token::new_atom(")"),
]);
let mut exp_symtypes = SymtypesCorpus {
types: array::from_fn(|_| Types::new()),
exports: HashMap::from([("bar".to_string(), 0), ("baz".to_string(), 1)]),
files: vec![
SymtypesFile {
path: PathBuf::from("test.symtypes"),
records: HashMap::from([
("s#foo".to_string(), Arc::clone(&foo_tokens_rc)),
("bar".to_string(), Arc::clone(&bar_tokens_rc)),
]),
},
SymtypesFile {
path: PathBuf::from("test2.symtypes"),
records: HashMap::from([("baz".to_string(), Arc::clone(&baz_tokens_rc))]),
},
],
};
exp_symtypes.types[type_bucket_idx("s#foo")]
.insert("s#foo".to_string(), vec![Arc::clone(&foo_tokens_rc)]);
exp_symtypes.types[type_bucket_idx("bar")]
.insert("bar".to_string(), vec![Arc::clone(&bar_tokens_rc)]);
exp_symtypes.types[type_bucket_idx("baz")]
.insert("baz".to_string(), vec![Arc::clone(&baz_tokens_rc)]);
assert_eq!(symtypes, exp_symtypes);
}
#[test]
fn read_empty_record_single() {
// Check that empty records are rejected when reading a single file.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { }\n",
"\n",
"bar void bar ( s#foo )\n",
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_parse_err!(result, "test.symtypes:2: Expected a record name");
assert!(warnings.is_empty());
}
#[test]
fn read_empty_record_consolidated() {
// Check that empty records are skipped when reading a consolidated file.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test_consolidated.symtypes",
bytes!(
"/* test.symtypes */\n",
"\n",
"s#foo struct foo { }\n",
"\n",
"bar void bar ( s#foo )\n",
"\n",
"/* test2.symtypes */\n",
"\n",
"baz int baz ( )\n",
"\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
assert_ne!(symtypes, SymtypesCorpus::new());
}
#[test]
fn read_duplicate_type_record() {
// Check that type records with duplicate names are rejected when reading a symtypes file.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"s#foo struct foo { int b ; }\n", //
),
&mut warnings,
);
assert_parse_err!(result, "test.symtypes:2: Duplicate record 's#foo'");
assert!(warnings.is_empty());
}
/*
TODO FIXME
#[test]
fn read_duplicate_file_record() {
// Check that file records with duplicate names are rejected when reading a consolidated file.
let mut symtypes = SymtypesCorpus::new();
let result = symtypes.load_buffer(
"test_consolidated.symtypes",
bytes!(
"/* test.symtypes */
\n",
" /* test.symtypes */
\n", //
),
);
assert_parse_err!(
result,
"test.symtypes:2: Duplicate record 'F#test.symtypes'"
);
}
*/
#[test]
fn read_invalid_reference() {
// Check that a record referencing a symbol with a missing declaration is rejected.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"bar void bar ( s#foo )\n", //
),
&mut warnings,
);
assert_parse_err!(result, "test.symtypes:1: Type 's#foo' is not known");
assert!(warnings.is_empty());
}
#[test]
fn read_duplicate_type_export() {
// Check that two exports with the same name in different files produce a warning.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"foo int foo ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let result = symtypes.load_buffer(
"test2.symtypes",
bytes!(
"foo int foo ( )", //
),
&mut warnings,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&warnings).unwrap(),
"test2.symtypes:1: WARNING: Export 'foo' is duplicate, previous occurrence found in 'test.symtypes'\n"
);
}
#[test]
fn read_write_basic() {
// Check reading of a single file and writing the consolidated output.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.write_consolidated_buffer(&mut out);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"/* test.symtypes */\n",
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
)
);
}
#[test]
fn read_write_shared_struct() {
// Check that a structure declaration shared by two files appears only once in the consolidated
// output.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let result = symtypes.load_buffer(
"test2.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"baz int baz ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.write_consolidated_buffer(&mut out);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"/* test.symtypes */\n",
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n",
"\n",
"/* test2.symtypes */\n",
"baz int baz ( s#foo )\n", //
)
);
}
#[test]
fn read_write_differing_struct() {
// Check that a structure declaration different in two files appears in all variants in the
// consolidated output and they are correctly referenced by the file entries.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let result = symtypes.load_buffer(
"test2.symtypes",
bytes!(
"s#foo struct foo { long a ; }\n",
"baz int baz ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.write_consolidated_buffer(&mut out);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"/* test.symtypes */\n",
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n",
"\n",
"/* test2.symtypes */\n",
"s#foo struct foo { long a ; }\n",
"baz int baz ( s#foo )\n", //
)
);
}
#[test]
fn write_split_basic() {
// Check basic writing of split files.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"consolidated.symtypes",
bytes!(
"/* test.symtypes */\n",
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n",
"\n",
"/* test2.symtypes */\n",
"s#foo struct foo { long a ; }\n",
"baz int baz ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = DirectoryWriter::new_buffer("split");
let result = symtypes.write_split_buffer(&mut out, 1);
assert_ok!(result);
let files = out.into_inner_map();
assert_eq!(files.len(), 2);
assert_eq!(
str::from_utf8(&files[Path::new("split/test.symtypes")]).unwrap(),
concat!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
)
);
assert_eq!(
str::from_utf8(&files[Path::new("split/test2.symtypes")]).unwrap(),
concat!(
"s#foo struct foo { long a ; }\n",
"baz int baz ( s#foo )\n", //
)
);
}
#[test]
fn compare_identical() {
// Check that the comparison of two identical corpuses shows no differences.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"a/test.symtypes",
bytes!(
"bar int bar ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut symtypes2 = SymtypesCorpus::new();
let result = symtypes2.load_buffer(
"b/test.symtypes",
bytes!(
"bar int bar ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.compare_with_buffer(&symtypes2, None, &mut out, 1);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"", //
)
);
}
#[test]
fn compare_added_export() {
// Check that the comparison of two corpuses reports any newly added export.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"a/test.symtypes",
bytes!(
"foo int foo ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut symtypes2 = SymtypesCorpus::new();
let result = symtypes2.load_buffer(
"b/test.symtypes",
bytes!(
"foo int foo ( )\n",
"bar int bar ( )\n",
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.compare_with_buffer(&symtypes2, None, &mut out, 1);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'bar' has been added\n",
"Export 'baz' has been added\n", //
)
);
}
#[test]
fn compare_removed_export() {
// Check that the comparison of two corpuses reports any removed export.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"a/test.symtypes",
bytes!(
"foo int foo ( )\n",
"bar int bar ( )\n",
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut symtypes2 = SymtypesCorpus::new();
let result = symtypes2.load_buffer(
"b/test.symtypes",
bytes!(
"baz int baz ( )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.compare_with_buffer(&symtypes2, None, &mut out, 1);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'bar' has been removed\n",
"Export 'foo' has been removed\n", //
)
);
}
#[test]
fn compare_changed_type() {
// Check that the comparison of two corpuses reports changed types and affected exports.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"a/test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut symtypes2 = SymtypesCorpus::new();
let result = symtypes2.load_buffer(
"b/test.symtypes",
bytes!(
"s#foo struct foo { int a ; int b ; }\n",
"bar int bar ( s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.compare_with_buffer(&symtypes2, None, &mut out, 1);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"The following '1' exports are different:\n",
" bar\n",
"\n",
"because of a changed 's#foo':\n",
"@@ -1,3 +1,4 @@\n",
" struct foo {\n",
" \tint a;\n",
"+\tint b;\n",
" }\n", //
)
);
}
#[test]
fn compare_changed_nested_type() {
// Check that the comparison of two corpuses reports also changes in subtypes even if the parent
// type itself is modified, as long as each subtype is referenced by the parent type in both
// inputs.
let mut symtypes = SymtypesCorpus::new();
let mut warnings = Vec::new();
let result = symtypes.load_buffer(
"a/test.symtypes",
bytes!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( int a , s#foo )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut symtypes2 = SymtypesCorpus::new();
let result = symtypes2.load_buffer(
"b/test.symtypes",
bytes!(
"s#foo struct foo { int a ; int b ; }\n",
"bar int bar ( s#foo , int a )\n", //
),
&mut warnings,
);
assert_ok!(result);
assert!(warnings.is_empty());
let mut out = Vec::new();
let result = symtypes.compare_with_buffer(&symtypes2, None, &mut out, 1);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"The following '1' exports are different:\n",
" bar\n",
"\n",
"because of a changed 'bar':\n",
"@@ -1,4 +1,4 @@\n",
" int bar (\n",
"-\tint a,\n",
"-\ts#foo\n",
"+\ts#foo,\n",
"+\tint a\n",
" )\n",
"\n",
"The following '1' exports are different:\n",
" bar\n",
"\n",
"because of a changed 's#foo':\n",
"@@ -1,3 +1,4 @@\n",
" struct foo {\n",
" \tint a;\n",
"+\tint b;\n",
" }\n", //
)
);
}
0707010000001C000081A40000000000000000000000016878E92800004093000000000000000000000000000000000000004000000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symtypes/tests_format.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
use crate::assert_ok;
#[test]
fn format_typedef() {
// Check the pretty format of a typedef declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("typedef"),
Token::new_atom("unsigned"),
Token::new_atom("long"),
Token::new_atom("long"),
Token::new_atom("u64"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"typedef unsigned long long u64", //
)
);
}
#[test]
fn format_enum() {
// Check the pretty format of an enum declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("enum"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("VALUE1"),
Token::new_atom(","),
Token::new_atom("VALUE2"),
Token::new_atom(","),
Token::new_atom("VALUE3"),
Token::new_atom("}"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"enum test {",
"\tVALUE1,",
"\tVALUE2,",
"\tVALUE3",
"}", //
)
);
}
#[test]
fn format_struct() {
// Check the pretty format of a struct declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue"),
Token::new_atom(";"),
Token::new_atom("long"),
Token::new_atom("lvalue"),
Token::new_atom(";"),
Token::new_atom("}"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"struct test {",
"\tint ivalue;",
"\tlong lvalue;",
"}", //
)
);
}
#[test]
fn format_union() {
// Check the pretty format of a union declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("union"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue"),
Token::new_atom(";"),
Token::new_atom("long"),
Token::new_atom("lvalue"),
Token::new_atom(";"),
Token::new_atom("}"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"union test {",
"\tint ivalue;",
"\tlong lvalue;",
"}", //
)
);
}
#[test]
fn format_function() {
// Check the pretty format of a function declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("void"),
Token::new_atom("test"),
Token::new_atom("("),
Token::new_atom("int"),
Token::new_atom("ivalue"),
Token::new_atom(","),
Token::new_atom("long"),
Token::new_atom("lvalue"),
Token::new_atom(")"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"void test (",
"\tint ivalue,",
"\tlong lvalue",
")", //
)
);
}
#[test]
fn format_enum_constant() {
// Check the pretty format of an enum constant declaration.
let pretty = pretty_format_type(&vec![Token::new_atom("7")]);
assert_eq!(
pretty,
crate::string_vec!(
"7", //
)
);
}
#[test]
fn format_nested() {
// Check the pretty format of a nested declaration.
let pretty = pretty_format_type(&vec![
Token::new_atom("union"),
Token::new_atom("nested"),
Token::new_atom("{"),
Token::new_atom("struct"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("}"),
Token::new_atom(";"),
Token::new_atom("long"),
Token::new_atom("lvalue"),
Token::new_atom(";"),
Token::new_atom("}"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"union nested {",
"\tstruct {",
"\t\tint ivalue1;",
"\t\tint ivalue2;",
"\t};",
"\tlong lvalue;",
"}", //
)
);
}
#[test]
fn format_imbalanced() {
// Check the pretty format of a declaration with wrongly balanced brackets.
let pretty = pretty_format_type(&vec![
Token::new_atom("struct"),
Token::new_atom("imbalanced"),
Token::new_atom("{"),
Token::new_atom("{"),
Token::new_atom("}"),
Token::new_atom("}"),
Token::new_atom("}"),
Token::new_atom(";"),
Token::new_atom("{"),
Token::new_atom("{"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"struct imbalanced {",
"\t{",
"\t}",
"}",
"};",
"{",
"\t{", //
)
);
}
#[test]
fn format_typeref() {
// Check the pretty format of a declaration with a reference to another type.
let pretty = pretty_format_type(&vec![
Token::new_atom("struct"),
Token::new_atom("typeref"),
Token::new_atom("{"),
Token::new_typeref("s#other"),
Token::new_atom("other"),
Token::new_atom(";"),
Token::new_atom("}"),
]);
assert_eq!(
pretty,
crate::string_vec!(
"struct typeref {",
"\ts#other other;",
"}", //
)
);
}
#[test]
fn format_removal() {
// Check the diff format when a struct member is removed.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,4 +1,3 @@\n",
" struct test {\n",
" \tint ivalue1;\n",
"-\tint ivalue2;\n",
" }\n", //
)
);
}
#[test]
fn format_removal_top() {
// Check the diff format when data is removed at the top.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
],
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,4 +1,3 @@\n",
"-int ivalue1;\n",
" int ivalue2;\n",
" int ivalue3;\n",
" int ivalue4;\n", //
)
);
}
#[test]
fn format_removal_end() {
// Check the diff format when data is removed at the end.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
],
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -2,4 +2,3 @@\n",
" int ivalue2;\n",
" int ivalue3;\n",
" int ivalue4;\n",
"-int ivalue5;\n", //
)
);
}
#[test]
fn format_max_context() {
// Check the diff format shows changes separated by up to 6 lines of context as one hunk.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue6"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue7"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue8"),
Token::new_atom(";"),
],
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue6"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue7"),
Token::new_atom(";"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,8 +1,6 @@\n",
"-int ivalue1;\n",
" int ivalue2;\n",
" int ivalue3;\n",
" int ivalue4;\n",
" int ivalue5;\n",
" int ivalue6;\n",
" int ivalue7;\n",
"-int ivalue8;\n", //
)
);
}
#[test]
fn format_max_context2() {
// Check the diff format shows changes separated by more than 6 lines of context as two hunks.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue6"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue7"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue8"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue9"),
Token::new_atom(";"),
],
&vec![
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue3"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue4"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue5"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue6"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue7"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue8"),
Token::new_atom(";"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,4 +1,3 @@\n",
"-int ivalue1;\n",
" int ivalue2;\n",
" int ivalue3;\n",
" int ivalue4;\n",
"@@ -6,4 +5,3 @@\n",
" int ivalue6;\n",
" int ivalue7;\n",
" int ivalue8;\n",
"-int ivalue9;\n", //
)
);
}
#[test]
fn format_addition() {
// Check the diff format when a struct member is added.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,3 +1,4 @@\n",
" struct test {\n",
" \tint ivalue1;\n",
"+\tint ivalue2;\n",
" }\n", //
)
);
}
#[test]
fn format_modification() {
// Check the diff format when a struct member is modified.
let mut out = Vec::new();
let result = write_type_diff(
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue1"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&vec![
Token::new_atom("struct"),
Token::new_atom("test"),
Token::new_atom("{"),
Token::new_atom("int"),
Token::new_atom("ivalue2"),
Token::new_atom(";"),
Token::new_atom("}"),
],
&mut out,
);
assert_ok!(result);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"@@ -1,3 +1,3 @@\n",
" struct test {\n",
"-\tint ivalue1;\n",
"+\tint ivalue2;\n",
" }\n", //
)
);
}
0707010000001D000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002F00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symvers0707010000001E000081A40000000000000000000000016878E9280000365C000000000000000000000000000000000000003600000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symvers/mod.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use crate::rules::Rules;
use crate::text::{Writer, read_lines};
use crate::{Error, MapIOErr, PathFile, debug};
use std::collections::HashMap;
use std::io::prelude::*;
use std::path::Path;
#[cfg(test)]
mod tests;
/// An export data.
#[derive(Debug, PartialEq)]
struct ExportInfo {
crc: u32,
module: String,
is_gpl_only: bool,
namespace: Option<String>,
}
impl ExportInfo {
/// Creates a new `ExportInfo` object.
pub fn new<S: Into<String>, T: Into<String>>(
crc: u32,
module: S,
is_gpl_only: bool,
namespace: Option<T>,
) -> Self {
Self {
crc,
module: module.into(),
is_gpl_only,
namespace: namespace.map(|n| n.into()),
}
}
/// Returns the type as a string slice.
pub fn type_as_str(&self) -> &str {
if self.is_gpl_only {
"EXPORT_SYMBOL_GPL"
} else {
"EXPORT_SYMBOL"
}
}
}
/// A collection of export records.
type Exports = HashMap<String, ExportInfo>;
/// The format of the output from [`SymversCorpus::compare_with()`].
#[derive(Clone, Copy)]
pub enum CompareFormat {
Null,
Pretty,
Symbols,
}
impl CompareFormat {
/// Obtains a [`CompareFormat`] matching the given format type, specified as a string.
pub fn try_from_str(format: &str) -> Result<Self, Error> {
match format {
"null" => Ok(Self::Null),
"pretty" => Ok(Self::Pretty),
"symbols" => Ok(Self::Symbols),
_ => Err(Error::new_parse(format!(
"Unrecognized format '{}'",
format
))),
}
}
}
/// A representation of a kernel ABI, loaded from symvers files.
#[derive(Debug, Default, PartialEq)]
pub struct SymversCorpus {
exports: Exports,
}
impl SymversCorpus {
/// Creates a new empty `SymversCorpus` object.
pub fn new() -> Self {
Self {
exports: Exports::new(),
}
}
/// Loads symvers data from a specified file.
///
/// New symvers records are appended to the already present ones.
pub fn load<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> {
let path = path.as_ref();
let file = PathFile::open(path).map_err(|err| {
Error::new_io(format!("Failed to open file '{}'", path.display()), err)
})?;
self.load_buffer(path, file)
}
/// Loads symvers data from a specified reader.
///
/// The `path` should point to the symvers file name, indicating the origin of the data. New
/// symvers records are appended to the already present ones.
pub fn load_buffer<P: AsRef<Path>, R: Read>(
&mut self,
path: P,
reader: R,
) -> Result<(), Error> {
let path = path.as_ref();
debug!("Loading symvers data from '{}'", path.display());
// Read all content from the file.
let lines = match read_lines(reader) {
Ok(lines) => lines,
Err(err) => return Err(Error::new_io("Failed to read symvers data", err)),
};
// Parse all records.
let mut new_exports = Exports::new();
for (line_idx, line) in lines.iter().enumerate() {
let (name, info) = parse_export(path, line_idx, line)?;
// Check if the record is a duplicate of another one.
if new_exports.contains_key(&name) || self.exports.contains_key(&name) {
return Err(Error::new_parse(format!(
"{}:{}: Duplicate record '{}'",
path.display(),
line_idx + 1,
name,
)));
}
new_exports.insert(name, info);
}
// Add the new exports.
self.exports.extend(new_exports);
Ok(())
}
/// Compares the symbols in `self` and `other_symvers`.
///
/// Writes reports about any found changes to the specified files, formatted as requested.
/// Returns [`Ok`] containing a `bool` indicating whether the corpuses are the same, or [`Err`]
/// on error.
pub fn compare_with<P: AsRef<Path>>(
&self,
other_symvers: &SymversCorpus,
maybe_rules: Option<&Rules>,
writers_conf: &[(CompareFormat, P)],
) -> Result<bool, Error> {
// Materialize all writers.
let mut writers = Vec::new();
for (format, path) in writers_conf {
writers.push((*format, Writer::new_file(path)?));
}
self.compare_with_buffer(other_symvers, maybe_rules, &mut writers[..])
}
/// Compares the symbols in `self` and `other_symvers`.
///
/// Writes reports about any found changes to the provided output streams, formatted as
/// requested. Returns [`Ok`] containing a `bool` indicating whether the corpuses are the same,
/// or [`Err`] on error.
pub fn compare_with_buffer<W: Write>(
&self,
other_symvers: &SymversCorpus,
maybe_rules: Option<&Rules>,
writers: &mut [(CompareFormat, W)],
) -> Result<bool, Error> {
// A helper function to handle common logic related to reporting a change. It determines if
// the change should be tolerated and updates the is_equal result.
fn process_change(
maybe_rules: Option<&Rules>,
name: &str,
info: &ExportInfo,
always_tolerated: bool,
is_equal: &mut bool,
) -> bool {
let tolerated = always_tolerated
|| match maybe_rules {
Some(rules) => {
rules.is_tolerated(name, &info.module, info.namespace.as_deref())
}
None => false,
};
if !tolerated {
*is_equal = false;
}
tolerated
}
// A helper function to obtain the "(tolerated)" suffix string.
fn tolerated_suffix(tolerated: bool) -> &'static str {
if tolerated { " (tolerated)" } else { "" }
}
let err_desc = "Failed to write a comparison result";
let mut names = self.exports.keys().collect::<Vec<_>>();
names.sort();
let mut other_names = other_symvers.exports.keys().collect::<Vec<_>>();
other_names.sort();
let mut is_equal = true;
// Check for symbols in self but not in other_symvers, and vice versa.
//
// Note that this code and all other checks below use the original symvers to consult the
// severity rules. That is, the original module and namespace values are matched against the
// rule patterns. A subtle detail is that added symbols, which lack a record in the original
// symvers, are always tolerated, so no rules come into play.
for (names_a, exports_a, exports_b, change, always_tolerated) in [
(
&names,
&self.exports,
&other_symvers.exports,
"removed",
false,
),
(
&other_names,
&other_symvers.exports,
&self.exports,
"added",
true,
),
] {
for &name in names_a {
if !exports_b.contains_key(name) {
let info = exports_a.get(name).unwrap();
let tolerated =
process_change(maybe_rules, name, info, always_tolerated, &mut is_equal);
for (format, writer) in &mut *writers {
match format {
CompareFormat::Null => {}
CompareFormat::Pretty => writeln!(
writer,
"Export '{}' has been {}{}",
name,
change,
tolerated_suffix(tolerated)
)
.map_io_err(err_desc)?,
CompareFormat::Symbols => {
if !tolerated {
writeln!(writer, "{}", name).map_io_err(err_desc)?
}
}
}
}
}
}
}
// Compare symbols that are in both symvers.
for name in names {
if let Some(other_info) = other_symvers.exports.get(name) {
let info = self.exports.get(name).unwrap();
if info.crc != other_info.crc {
let tolerated = process_change(maybe_rules, name, info, false, &mut is_equal);
for (format, writer) in &mut *writers {
match format {
CompareFormat::Null => {}
CompareFormat::Pretty => writeln!(
writer,
"Export '{}' changed CRC from '{:#010x}' to '{:#010x}'{}",
name,
info.crc,
other_info.crc,
tolerated_suffix(tolerated)
)
.map_io_err(err_desc)?,
CompareFormat::Symbols => {
if !tolerated {
writeln!(writer, "{}", name).map_io_err(err_desc)?
}
}
}
}
}
if info.is_gpl_only != other_info.is_gpl_only {
let tolerated = process_change(
maybe_rules,
name,
info,
info.is_gpl_only && !other_info.is_gpl_only,
&mut is_equal,
);
for (format, writer) in &mut *writers {
match format {
CompareFormat::Null => {}
CompareFormat::Pretty => writeln!(
writer,
"Export '{}' changed type from '{}' to '{}'{}",
name,
info.type_as_str(),
other_info.type_as_str(),
tolerated_suffix(tolerated)
)
.map_io_err(err_desc)?,
CompareFormat::Symbols => {
if !tolerated {
writeln!(writer, "{}", name).map_io_err(err_desc)?
}
}
}
}
}
}
}
for (_, writer) in &mut *writers {
writer.flush().map_io_err(err_desc)?;
}
Ok(is_equal)
}
}
/// Parses a single symvers record.
fn parse_export(path: &Path, line_idx: usize, line: &str) -> Result<(String, ExportInfo), Error> {
let mut words = line.split_ascii_whitespace();
// Parse the CRC value.
let crc = words.next().ok_or_else(|| {
Error::new_parse(format!(
"{}:{}: The export does not specify a CRC",
path.display(),
line_idx + 1
))
})?;
if !crc.starts_with("0x") && !crc.starts_with("0X") {
return Err(Error::new_parse(format!(
"{}:{}: Failed to parse the CRC value '{}': string does not start with 0x or 0X",
path.display(),
line_idx + 1,
crc
)));
}
let crc = u32::from_str_radix(&crc[2..], 16).map_err(|err| {
Error::new_parse(format!(
"{}:{}: Failed to parse the CRC value '{}': {}",
path.display(),
line_idx + 1,
crc,
err
))
})?;
// Parse the export name.
let name = words.next().ok_or_else(|| {
Error::new_parse(format!(
"{}:{}: The export does not specify a name",
path.display(),
line_idx + 1
))
})?;
// Parse the module name.
let module = words.next().ok_or_else(|| {
Error::new_parse(format!(
"{}:{}: The export does not specify a module",
path.display(),
line_idx + 1
))
})?;
// Parse the export type.
let export_type = words.next().ok_or_else(|| {
Error::new_parse(format!(
"{}:{}: The export does not specify a type",
path.display(),
line_idx + 1
))
})?;
let is_gpl_only = match export_type {
"EXPORT_SYMBOL" => false,
"EXPORT_SYMBOL_GPL" => true,
_ => {
return Err(Error::new_parse(format!(
"{}:{}: Invalid export type '{}', must be either EXPORT_SYMBOL or EXPORT_SYMBOL_GPL",
path.display(),
line_idx + 1,
export_type
)));
}
};
// Parse an optional namespace.
let namespace = words.next().map(String::from);
// Check that nothing else is left on the line.
if let Some(word) = words.next() {
return Err(Error::new_parse(format!(
"{}:{}: Unexpected string '{}' found at the end of the export record",
path.display(),
line_idx + 1,
word
)));
}
Ok((
name.to_string(),
ExportInfo::new(crc, module, is_gpl_only, namespace),
))
}
0707010000001F000081A40000000000000000000000016878E92800003232000000000000000000000000000000000000003800000000suse-kabi-tools-0.5.0+git0.9ad91db/src/symvers/tests.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
use crate::text::Writer;
use crate::{assert_inexact_parse_err, assert_ok, assert_ok_eq, assert_parse_err, bytes};
#[test]
fn read_export_basic() {
// Check that basic parsing works correctly.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678\tfoo\tvmlinux\tEXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
assert_eq!(
symvers,
SymversCorpus {
exports: HashMap::from([(
"foo".to_string(),
ExportInfo::new(0x12345678, "vmlinux", false, None::<&str>)
)])
}
);
}
#[test]
fn read_empty_record() {
// Check that empty records are rejected when reading a symvers file.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"\n",
"0x90abcdef bar vmlinux EXPORT_SYMBOL_GPL BAR_NS\n", //
),
);
assert_parse_err!(result, "test.symvers:2: The export does not specify a CRC");
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_duplicate_symbol_record() {
// Check that symbol records with duplicate names are rejected when reading a symvers file.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x12345678 foo vmlinux EXPORT_SYMBOL_GPL\n", //
),
);
assert_parse_err!(result, "test.symvers:2: Duplicate record 'foo'");
}
#[test]
fn read_invalid_crc() {
// Check that a CRC value not starting with 0x/0X is rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0 foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_parse_err!(
result,
"test.symvers:1: Failed to parse the CRC value '0': string does not start with 0x or 0X"
);
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_invalid_crc2() {
// Check that a CRC value containing non-hexadecimal digits is rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0xabcdefgh foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_inexact_parse_err!(
result,
"test.symvers:1: Failed to parse the CRC value '0xabcdefgh': *"
);
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_no_name() {
// Check that records without a name are rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678\n", //
),
);
assert_parse_err!(result, "test.symvers:1: The export does not specify a name");
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_no_module() {
// Check that records without a module are rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo\n", //
),
);
assert_parse_err!(
result,
"test.symvers:1: The export does not specify a module"
);
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_type() {
// Check that the EXPORT_SYMBOL and EXPORT_SYMBOL_GPL types are correctly recognized.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x90abcdef bar vmlinux EXPORT_SYMBOL_GPL\n", //
),
);
assert_ok!(result);
assert_eq!(
symvers,
SymversCorpus {
exports: HashMap::from([
(
"foo".to_string(),
ExportInfo::new(0x12345678, "vmlinux", false, None::<&str>)
),
(
"bar".to_string(),
ExportInfo::new(0x90abcdef, "vmlinux", true, None::<&str>)
),
])
}
);
}
#[test]
fn read_no_type() {
// Check that records without a type are rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux\n", //
),
);
assert_parse_err!(result, "test.symvers:1: The export does not specify a type");
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_invalid_type() {
// Check that an invalid type is rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_UNUSED_SYMBOL\n", //
),
);
assert_parse_err!(
result,
"test.symvers:1: Invalid export type 'EXPORT_UNUSED_SYMBOL', must be either EXPORT_SYMBOL or EXPORT_SYMBOL_GPL"
);
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn read_namespace() {
// Check that an optional namespace is correctly accepted.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL_GPL FOO_NS\n", //
),
);
assert_ok!(result);
assert_eq!(
symvers,
SymversCorpus {
exports: HashMap::from([(
"foo".to_string(),
ExportInfo::new(0x12345678, "vmlinux", true, Some("FOO_NS"))
)])
}
);
}
#[test]
fn read_extra_data() {
// Check that any extra data after the namespace is rejected.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL_GPL FOO_NS garbage\n", //
),
);
assert_parse_err!(
result,
"test.symvers:1: Unexpected string 'garbage' found at the end of the export record"
);
assert_eq!(symvers, SymversCorpus::new());
}
#[test]
fn compare_identical() {
// Check that the comparison of two identical symvers shows no differences.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x12345678\tfoo\tvmlinux\tEXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result =
symvers.compare_with_buffer(&symvers2, None, &mut [(CompareFormat::Pretty, &mut writer)]);
let out = writer.into_inner_vec();
assert_ok_eq!(result, true);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"", //
)
);
}
#[test]
fn compare_added_export() {
// Check that the comparison of two symvers reports any newly added export.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x90abcdef bar vmlinux EXPORT_SYMBOL_GPL BAR_NS\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result =
symvers.compare_with_buffer(&symvers2, None, &mut [(CompareFormat::Pretty, &mut writer)]);
let out = writer.into_inner_vec();
assert_ok_eq!(result, true);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'bar' has been added (tolerated)\n", //
)
);
}
#[test]
fn compare_removed_export() {
// Check that the comparison of two symvers reports any removed export.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x90abcdef bar vmlinux EXPORT_SYMBOL_GPL BAR_NS\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x90abcdef bar vmlinux EXPORT_SYMBOL_GPL BAR_NS\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result =
symvers.compare_with_buffer(&symvers2, None, &mut [(CompareFormat::Pretty, &mut writer)]);
let out = writer.into_inner_vec();
assert_ok_eq!(result, false);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'foo' has been removed\n", //
)
);
}
#[test]
fn compare_changed_crc() {
// Check that the comparison of two symvers reports exports with changed CRCs.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x09abcdef foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result =
symvers.compare_with_buffer(&symvers2, None, &mut [(CompareFormat::Pretty, &mut writer)]);
let out = writer.into_inner_vec();
assert_ok_eq!(result, false);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'foo' changed CRC from '0x12345678' to '0x09abcdef'\n", //
)
);
}
#[test]
fn compare_changed_type() {
// Check that the comparison of two symvers reports exports with changed types.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x23456789 bar vmlinux EXPORT_SYMBOL\n",
"0x34567890 baz vmlinux EXPORT_SYMBOL_GPL\n",
"0x4567890a qux vmlinux EXPORT_SYMBOL_GPL\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n",
"0x23456789 bar vmlinux EXPORT_SYMBOL_GPL\n",
"0x34567890 baz vmlinux EXPORT_SYMBOL\n",
"0x4567890a qux vmlinux EXPORT_SYMBOL_GPL\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result =
symvers.compare_with_buffer(&symvers2, None, &mut [(CompareFormat::Pretty, &mut writer)]);
let out = writer.into_inner_vec();
assert_ok_eq!(result, false);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'bar' changed type from 'EXPORT_SYMBOL' to 'EXPORT_SYMBOL_GPL'\n",
"Export 'baz' changed type from 'EXPORT_SYMBOL_GPL' to 'EXPORT_SYMBOL' (tolerated)\n", //
)
);
}
#[test]
fn compare_ignored_changes() {
// Check that severity rules can be used to tolerate changes.
let mut symvers = SymversCorpus::new();
let result = symvers.load_buffer(
"a/test.symvers",
bytes!(
"0x12345678 foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut symvers2 = SymversCorpus::new();
let result = symvers2.load_buffer(
"b/test.symvers",
bytes!(
"0x90abcdef foo vmlinux EXPORT_SYMBOL\n", //
),
);
assert_ok!(result);
let mut rules = Rules::new();
let result = rules.load_buffer(
"test.severities",
bytes!(
"vmlinux PASS\n", //
),
);
assert_ok!(result);
let mut writer = Writer::new_buffer();
let result = symvers.compare_with_buffer(
&symvers2,
Some(&rules),
&mut [(CompareFormat::Pretty, &mut writer)],
);
let out = writer.into_inner_vec();
assert_ok_eq!(result, true);
assert_eq!(
str::from_utf8(&out).unwrap(),
concat!(
"Export 'foo' changed CRC from '0x12345678' to '0x90abcdef' (tolerated)\n", //
)
);
}
07070100000020000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002C00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/text07070100000021000081A40000000000000000000000016878E92800004FEC000000000000000000000000000000000000003300000000suse-kabi-tools-0.5.0+git0.9ad91db/src/text/mod.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use crate::{Error, MapIOErr, PathFile, debug};
use std::collections::{HashMap, HashSet};
use std::fmt::Display;
use std::io::{BufReader, BufWriter, prelude::*};
use std::ops::{Index, IndexMut};
use std::path::{Path, PathBuf};
use std::{cmp, fs, io};
#[cfg(test)]
mod tests_diff;
#[cfg(test)]
mod tests_filter;
#[cfg(test)]
mod tests_wildcard;
// Implementation of the Myers diff algorithm:
// Myers, E.W. An O(ND) difference algorithm and its variations. Algorithmica 1, 251--266 (1986).
// https://doi.org/10.1007/BF01840446
/// A step in the edit script.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum Edit {
KeepA(usize),
RemoveA(usize),
InsertB(usize),
}
/// An edit script which describes how to transform `a` to `b`.
type EditScript = Vec<Edit>;
/// A limited [`Vec`] wrapper which allows indexing by `isize` in range
/// `(-self.0.len() / 2)..((self.0.len() + 1) / 2`) instead of `0..self.0.len()`.
struct IVec<T>(Vec<T>);
impl<T> Index<isize> for IVec<T> {
type Output = T;
fn index(&self, index: isize) -> &T {
let real_index = (self.0.len() / 2).wrapping_add_signed(index);
&self.0[real_index]
}
}
impl<T> IndexMut<isize> for IVec<T> {
fn index_mut(&mut self, index: isize) -> &mut T {
let real_index = (self.0.len() / 2).wrapping_add_signed(index);
&mut self.0[real_index]
}
}
/// An edit step + an identifier of the previous steps leading to the current point during the edit
/// graph traversal.
#[derive(Clone, Copy)]
struct EditChain {
prev: usize,
step: Edit,
}
/// A state of a diagonal during the edit graph traversal.
#[derive(Clone)]
struct DiagonalState {
x: usize,
edit_index: usize,
}
/// Compares `a` with `b` and returns an edit script describing how to transform the former to the
/// latter.
fn myers<T: AsRef<str> + PartialEq>(a: &[T], b: &[T]) -> EditScript {
let max = a.len() + b.len();
let mut v = IVec(vec![
DiagonalState {
x: usize::MAX,
edit_index: usize::MAX,
};
// Minimum of 3 diagonals to allow accessing `v[1].x` when the inputs are empty.
cmp::max(2 * max + 1, 3)
]);
v[1].x = 0;
let mut edit_chains = Vec::new();
for d in 0..(max as isize + 1) {
for k in (-d..d + 1).step_by(2) {
// Determine where to progress, insert from `b` or remove from `a`.
let insert_b = k == -d || (k != d && v[k - 1].x < v[k + 1].x);
let (mut x, mut edit_index) = if insert_b {
(v[k + 1].x, v[k + 1].edit_index)
} else {
(v[k - 1].x + 1, v[k - 1].edit_index)
};
let mut y = x.wrapping_add_signed(-k);
// Record the step in the edit script. Skip the first step in the algorithm which
// initially brings the traversal to (0,0).
if d != 0 {
edit_chains.push(EditChain {
prev: edit_index,
step: if insert_b {
Edit::InsertB(y - 1)
} else {
Edit::RemoveA(x - 1)
},
});
edit_index = edit_chains.len() - 1;
}
// Look for a snake.
while x < a.len() && y < b.len() && a[x] == b[y] {
(x, y) = (x + 1, y + 1);
edit_chains.push(EditChain {
prev: edit_index,
step: Edit::KeepA(x - 1),
});
edit_index = edit_chains.len() - 1;
}
// Check if the end is reached or more steps are needed.
if x >= a.len() && y >= b.len() {
// Traverse the edit chain and turn it into a proper edit script.
let mut edit_script = EditScript::new();
while edit_index != usize::MAX {
let edit_chain = edit_chains[edit_index];
edit_script.push(edit_chain.step);
edit_index = edit_chain.prev;
}
edit_script.reverse();
return edit_script;
}
v[k] = DiagonalState { x, edit_index };
}
}
unreachable!();
}
/// Writes a single diff hunk to the provided output stream.
fn write_hunk<W: Write>(
hunk_pos_a: usize,
hunk_len_a: usize,
hunk_pos_b: usize,
hunk_len_b: usize,
hunk_data: &[String],
mut writer: W,
) -> Result<(), Error> {
let err_desc = "Failed to write a diff hunk";
writeln!(
writer,
"@@ -{},{} +{},{} @@",
hunk_pos_a, hunk_len_a, hunk_pos_b, hunk_len_b
)
.map_io_err(err_desc)?;
for hunk_str in hunk_data {
writeln!(writer, "{}", hunk_str).map_io_err(err_desc)?;
}
Ok(())
}
/// Compares `a` with `b` and writes their unified diff to the provided output stream.
pub fn unified_diff<T: AsRef<str> + PartialEq + Display, W: Write>(
a: &[T],
b: &[T],
mut writer: W,
) -> Result<(), Error> {
// Diff the two inputs and calculate the edit script.
let edit_script = myers(a, b);
// Turn the edit script into hunks in the unified format.
const CONTEXT_SIZE: usize = 3;
let (mut context_begin, mut context_end) = (0, 0);
let (mut pos_a, mut pos_b) = (1, 1);
let (mut hunk_pos_a, mut hunk_len_a, mut hunk_pos_b, mut hunk_len_b) = (0, 0, 0, 0);
let mut hunk_data = Vec::new();
for edit in edit_script {
match edit {
Edit::KeepA(index_a) => {
// Start recording a new context, or extend the current one.
if context_begin == context_end {
context_begin = index_a;
context_end = context_begin + 1;
} else {
context_end += 1;
}
// Update the positions.
pos_a += 1;
pos_b += 1;
// If handling a hunk, check if it should be closed off.
if !hunk_data.is_empty() && context_end - context_begin > 2 * CONTEXT_SIZE {
for line in a.iter().skip(context_begin).take(CONTEXT_SIZE) {
hunk_data.push(format!(" {}", line));
}
hunk_len_a += CONTEXT_SIZE;
hunk_len_b += CONTEXT_SIZE;
context_begin += CONTEXT_SIZE;
write_hunk(
hunk_pos_a,
hunk_len_a,
hunk_pos_b,
hunk_len_b,
&hunk_data,
writer.by_ref(),
)?;
hunk_data.clear();
}
}
Edit::RemoveA(_) | Edit::InsertB(_) => {
// Open a new hunk if not already handling one.
if hunk_data.is_empty() {
if context_end - context_begin > CONTEXT_SIZE {
context_begin = context_end - CONTEXT_SIZE;
}
hunk_pos_a = pos_a - (context_end - context_begin);
hunk_len_a = 0;
hunk_pos_b = pos_b - (context_end - context_begin);
hunk_len_b = 0;
}
// Update the positions.
if let Edit::RemoveA(_) = edit {
pos_a += 1;
} else {
pos_b += 1;
}
// Add any accumulated context.
for line in a.iter().take(context_end).skip(context_begin) {
hunk_data.push(format!(" {}", line));
}
hunk_len_a += context_end - context_begin;
hunk_len_b += context_end - context_begin;
context_begin = context_end;
// Record the removed/added string.
if let Edit::RemoveA(index_a) = edit {
hunk_data.push(format!("-{}", a[index_a]));
hunk_len_a += 1;
} else if let Edit::InsertB(index_b) = edit {
hunk_data.push(format!("+{}", b[index_b]));
hunk_len_b += 1;
}
}
}
}
// Close off the last hunk, if one is open.
if !hunk_data.is_empty() {
if context_end - context_begin > CONTEXT_SIZE {
context_end = context_begin + CONTEXT_SIZE;
}
for line in a.iter().take(context_end).skip(context_begin) {
hunk_data.push(format!(" {}", line));
}
hunk_len_a += context_end - context_begin;
hunk_len_b += context_end - context_begin;
write_hunk(
hunk_pos_a,
hunk_len_a,
hunk_pos_b,
hunk_len_b,
&hunk_data,
writer.by_ref(),
)?;
}
writer.flush().map_io_err("Failed to write a diff hunk")?;
Ok(())
}
// Rust implementation of the Salz's wildcard method:
// https://github.com/richsalz/wildmat
// Original code has been placed in the public domain.
#[derive(PartialEq)]
enum DoMatchResult {
True,
False,
Abort,
}
/// Attempts to match the given text against the specified shell wildcard pattern.
fn do_match(mut text: &[char], mut p: &[char]) -> DoMatchResult {
while p[0] != '\0' {
if text[0] == '\0' && p[0] != '*' {
return DoMatchResult::Abort;
}
match p[0] {
'\\' => {
// Literal match with following character.
p = &p[1..];
if text[0] != p[0] {
return DoMatchResult::False;
}
}
'?' => {
// Match anything.
}
'*' => {
p = &p[1..];
while p[0] == '*' {
// Consecutive stars act just like one.
p = &p[1..];
}
if p[0] == '\0' {
// Trailing star matches everything.
return DoMatchResult::True;
}
while text[0] != '\0' {
let matched = do_match(text, p);
if matched != DoMatchResult::False {
return matched;
}
text = &text[1..];
}
return DoMatchResult::Abort;
}
'[' => {
let reverse = p[1] == '^';
if reverse {
// Inverted character class.
p = &p[1..];
}
let mut matched = false;
if p[1] == ']' || p[1] == '-' {
p = &p[1..];
if p[0] == text[0] {
matched = true;
}
}
let mut last = p[0];
p = &p[1..];
while p[0] != '\0' && p[0] != ']' {
// This next line requires a good C compiler.
if if p[0] == '-' && p[1] != ']' {
p = &p[1..];
text[0] <= p[0] && text[0] >= last
} else {
text[0] == p[0]
} {
matched = true;
}
last = p[0];
p = &p[1..];
}
if matched == reverse {
return DoMatchResult::False;
}
}
_ => {
if text[0] != p[0] {
return DoMatchResult::False;
}
}
}
text = &text[1..];
p = &p[1..];
}
if text[0] == '\0' {
DoMatchResult::True
} else {
DoMatchResult::False
}
}
/// Checks whether the given text matches the specified shell wildcard pattern.
pub fn matches_wildcard(text: &str, pattern: &str) -> bool {
if pattern == "*" {
return true;
}
let mut text = text.chars().collect::<Vec<_>>();
text.push('\0');
let mut pattern = pattern.chars().collect::<Vec<_>>();
pattern.push('\0');
do_match(&text, &pattern) == DoMatchResult::True
}
/// Reads data from a specified reader and returns its content as a [`Vec`] of [`String`] lines.
pub fn read_lines<R: Read>(reader: R) -> io::Result<Vec<String>> {
let reader = BufReader::new(reader);
let mut lines = Vec::new();
for maybe_line in reader.lines() {
match maybe_line {
Ok(line) => lines.push(line),
Err(err) => return Err(err),
};
}
Ok(lines)
}
/// A writer to the standard output, a file or an internal buffer.
pub enum Writer {
Stdout(io::Stdout),
File(BufWriter<PathFile>),
Buffer(Vec<u8>),
NamedBuffer(PathBuf, Vec<u8>),
}
impl Writer {
/// Creates a new [`Writer`] that writes to the specified file. Treats "-" as the standard
/// output.
pub fn new_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let path = path.as_ref();
if path == Path::new("-") {
Ok(Self::Stdout(io::stdout()))
} else {
Self::new_exact_file(path)
}
}
/// Creates a new [`Writer`] that writes to the specified file.
pub fn new_exact_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let path = path.as_ref();
match PathFile::create(path) {
Ok(file) => Ok(Self::File(BufWriter::new(file))),
Err(err) => Err(Error::new_io(
format!("Failed to create file '{}'", path.display()),
err,
)),
}
}
/// Creates a new [`Writer`] that writes to an internal buffer.
pub fn new_buffer() -> Self {
Self::Buffer(Vec::new())
}
/// Creates a new [`Writer`] that writes to a named internal buffer.
pub fn new_named_buffer<P: AsRef<Path>>(path: P) -> Self {
Self::NamedBuffer(path.as_ref().to_path_buf(), Vec::new())
}
/// Obtains the internal buffer if the writer is of the [`Writer::Buffer`] type.
pub fn into_inner_vec(self) -> Vec<u8> {
match self {
Self::Buffer(vec) => vec,
_ => panic!("The writer is not of type Writer::Buffer"),
}
}
/// Obtains the path and internal buffer if the writer is of the [`Writer::NamedBuffer`] type.
pub fn into_inner_path_vec(self) -> (PathBuf, Vec<u8>) {
match self {
Self::NamedBuffer(path, vec) => (path, vec),
_ => panic!("The writer is not of type Writer::NamedBuffer"),
}
}
}
impl Write for Writer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self {
Self::Stdout(stdout) => stdout.write(buf),
Self::File(file) => file.write(buf),
Self::Buffer(vec) => vec.write(buf),
Self::NamedBuffer(_, vec) => vec.write(buf),
}
}
fn flush(&mut self) -> io::Result<()> {
match self {
Self::Stdout(stdout) => stdout.flush(),
Self::File(file) => file.flush(),
Self::Buffer(vec) => vec.flush(),
Self::NamedBuffer(_, vec) => vec.flush(),
}
}
}
/// A factory trait for [`Write`] objects, allowing writing to multiple files/streams.
pub trait WriteGenerator<W: Write> {
/// Opens a new writer to the specified path.
fn create<P: AsRef<Path>>(&mut self, sub_path: P) -> Result<W, Error>;
/// Closes a writer previously provided by the `create()` method.
fn close(&mut self, writer: W);
}
/// A factory for writing multiple files in a specific directory. The output can be written directly
/// to on-disk files, or stored in a set of internal buffers.
pub enum DirectoryWriter {
File(PathBuf),
Buffer(PathBuf, HashMap<PathBuf, Vec<u8>>),
}
impl DirectoryWriter {
/// Creates a new [`DirectoryWriter`] that writes to on-disk files in a specified directory.
pub fn new_file<P: AsRef<Path>>(root: P) -> Self {
Self::File(root.as_ref().to_path_buf())
}
/// Creates a new [`DirectoryWriter`] that writes to a set of internal buffers.
pub fn new_buffer<P: AsRef<Path>>(root: P) -> Self {
Self::Buffer(root.as_ref().to_path_buf(), HashMap::new())
}
/// Obtains the internal buffers if the writer is of the [`DirectoryWriter::Buffer`] type.
pub fn into_inner_map(self) -> HashMap<PathBuf, Vec<u8>> {
match self {
Self::Buffer(_, files) => files,
_ => panic!("The writer is not of type DirectoryWriter::Buffer"),
}
}
}
impl WriteGenerator<Writer> for &mut DirectoryWriter {
fn create<P: AsRef<Path>>(&mut self, sub_path: P) -> Result<Writer, Error> {
match self {
DirectoryWriter::File(root) => {
let path = root.join(sub_path);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|err| {
Error::new_io(
format!("Failed to create directory '{}'", parent.display()),
err,
)
})?;
}
Writer::new_exact_file(path)
}
DirectoryWriter::Buffer(root, _) => Ok(Writer::new_named_buffer(root.join(sub_path))),
}
}
fn close(&mut self, writer: Writer) {
if let DirectoryWriter::Buffer(_, files) = self {
let (path, vec) = writer.into_inner_path_vec();
files.insert(path, vec);
}
}
}
/// A collection of shell wildcard patterns used to filter symbol or file names.
#[derive(Debug, Default, PartialEq)]
pub struct Filter {
// Literal patterns.
literals: HashSet<String>,
// Wildcard patterns.
wildcards: Vec<String>,
}
impl Filter {
/// Creates a new empty `Filter` object.
pub fn new() -> Self {
Self {
literals: HashSet::new(),
wildcards: Vec::new(),
}
}
/// Loads filter data from a specified file.
///
/// New patterns are appended to the already present ones.
pub fn load<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> {
let path = path.as_ref();
let file = PathFile::open(path).map_err(|err| {
Error::new_io(format!("Failed to open file '{}'", path.display()), err)
})?;
self.load_buffer(path, file)
}
/// Loads filter data from a specified reader.
///
/// The `path` should point to the filter file name, indicating the origin of the data. New
/// patterns are appended to the already present ones.
pub fn load_buffer<P: AsRef<Path>, R: Read>(
&mut self,
path: P,
reader: R,
) -> Result<(), Error> {
let path = path.as_ref();
debug!("Loading filter data from '{}'", path.display());
// Read all content from the file.
let lines = match read_lines(reader) {
Ok(lines) => lines,
Err(err) => return Err(Error::new_io("Failed to read filter data", err)),
};
// Validate the patterns, reject empty ones.
for (line_idx, line) in lines.iter().enumerate() {
if line.is_empty() {
return Err(Error::new_parse(format!(
"{}:{}: Expected a pattern",
path.display(),
line_idx + 1
)));
}
}
// Insert the new patterns.
for line in lines {
if line
.chars()
.any(|x| x == '\\' || x == '?' || x == '*' || x == '[')
{
self.wildcards.push(line);
} else {
self.literals.insert(line);
}
}
Ok(())
}
/// Checks whether the given text matches any of the filter patterns.
pub fn matches(&self, name: &str) -> bool {
if self.literals.contains(name) {
return true;
}
for pattern in &self.wildcards {
if matches_wildcard(name, pattern) {
return true;
}
}
false
}
}
07070100000022000081A40000000000000000000000016878E92800001066000000000000000000000000000000000000003A00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/text/tests_diff.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
#[test]
fn myers_trivial_empty() {
// Check a situation when no operation is needed because both inputs are empty.
let a: [&str; 0] = [];
let b = [];
let edit_script = myers(&a, &b);
assert_eq!(edit_script, []);
}
#[test]
fn myers_trivial_replace() {
// Check a situation when a complete replacement is needed.
let a = ["X"];
let b = ["Y"];
let edit_script = myers(&a, &b);
assert_eq!(edit_script, [Edit::RemoveA(0), Edit::InsertB(0)]);
}
#[test]
fn myers_trivial_insert() {
// Check a situation when an insert operation from `b` is the only step needed.
let a = [];
let b = ["X"];
let edit_script = myers(&a, &b);
assert_eq!(edit_script, [Edit::InsertB(0)]);
}
#[test]
fn myers_trivial_remove() {
// Check a situation when a remove operation from `a` is the only step needed.
let a = ["X"];
let b = [];
let edit_script = myers(&a, &b);
assert_eq!(edit_script, [Edit::RemoveA(0)]);
}
#[test]
fn myers_trivial_keep() {
// Check a situation when a keep operation from `a` is the only step needed.
let a = ["X"];
let b = ["X"];
let edit_script = myers(&a, &b);
assert_eq!(edit_script, [Edit::KeepA(0)]);
}
#[test]
fn myers_insert_front() {
// Check a situation when an insert operation at the front of `a` is needed.
let a = ["X", "Y"];
let b = ["W", "X", "Y"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::InsertB(0), Edit::KeepA(0), Edit::KeepA(1)]
);
}
#[test]
fn myers_insert_middle() {
// Check a situation when an insert operation in the middle of `a` is needed.
let a = ["X", "Z"];
let b = ["X", "Y", "Z"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::KeepA(0), Edit::InsertB(1), Edit::KeepA(1)]
);
}
#[test]
fn myers_insert_end() {
// Check a situation when an insert operation at the end of `a` is needed.
let a = ["X", "Y"];
let b = ["X", "Y", "Z"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::KeepA(0), Edit::KeepA(1), Edit::InsertB(2)]
);
}
#[test]
fn myers_insert_subsequent() {
// Check a situation when subsequent insert operations in `a` are needed.
let a = [];
let b = ["X", "Y", "Z"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::InsertB(0), Edit::InsertB(1), Edit::InsertB(2)]
);
}
#[test]
fn myers_remove_front() {
// Check a situation when a remove operation from the front of `a` is needed.
let a = ["W", "X", "Y"];
let b = ["X", "Y"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::RemoveA(0), Edit::KeepA(1), Edit::KeepA(2)]
);
}
#[test]
fn myers_remove_middle() {
// Check a situation when a remove operation from the middle of `a` is needed.
let a = ["X", "Y", "Z"];
let b = ["X", "Z"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::KeepA(0), Edit::RemoveA(1), Edit::KeepA(2)]
);
}
#[test]
fn myers_remove_end() {
// Check a situation when a remove operation from the end of `a` is needed.
let a = ["X", "Y", "Z"];
let b = ["X", "Y"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::KeepA(0), Edit::KeepA(1), Edit::RemoveA(2)]
);
}
#[test]
fn myers_remove_subsequent() {
// Check a situation when subsequent remove operations from `a` are needed.
let a = ["X", "Y", "Z"];
let b = [];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[Edit::RemoveA(0), Edit::RemoveA(1), Edit::RemoveA(2)]
);
}
#[test]
fn myers_keep_subsequent() {
// Check a situation when subsequent keep operations from `a` are needed.
let a = ["X", "Y", "Z"];
let b = ["W", "X", "Y"];
let edit_script = myers(&a, &b);
assert_eq!(
edit_script,
[
Edit::InsertB(0),
Edit::KeepA(0),
Edit::KeepA(1),
Edit::RemoveA(2)
]
);
}
07070100000023000081A40000000000000000000000016878E928000008EF000000000000000000000000000000000000003C00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/text/tests_filter.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
use crate::{assert_ok, assert_parse_err, bytes, string_vec};
#[test]
fn read_literal_pattern() {
// Check that patterns containing regular characters are considered as literals.
let mut filter = Filter::new();
let result = filter.load_buffer(
"test.filter",
bytes!(
"abc\n", "ABC\n", "_09\n", //
),
);
assert_ok!(result);
assert_eq!(
filter,
Filter {
literals: HashSet::from(["abc".to_string(), "ABC".to_string(), "_09".to_string()]),
wildcards: vec![],
}
);
}
#[test]
fn read_wildcard_pattern() {
// Check that patterns containing specific special characters are considered as wildcards.
let mut filter = Filter::new();
let result = filter.load_buffer(
"test.filter",
bytes!(
"\\abc\n", "a?bc\n", "ab*c\n", "abc[\n", //
),
);
assert_ok!(result);
assert_eq!(
filter,
Filter {
literals: HashSet::new(),
wildcards: string_vec!["\\abc", "a?bc", "ab*c", "abc["],
}
);
}
#[test]
fn read_empty_record() {
// Check that empty records are rejected when reading a filter file.
let mut filter = Filter::new();
let result = filter.load_buffer(
"test.filter",
bytes!(
"foo\n", "\n", "bar\n", //
),
);
assert_parse_err!(result, "test.filter:2: Expected a pattern");
assert_eq!(filter, Filter::new());
}
#[test]
fn matches_literal_pattern() {
// Check that a filter can match a literal pattern.
let mut filter = Filter::new();
let result = filter.load_buffer(
"test.filter",
bytes!(
"abc\n" //
),
);
assert_ok!(result);
assert!(filter.matches("abc"));
assert!(!filter.matches("Xbc"));
}
#[test]
fn matches_wildcard_pattern() {
// Check that a filter can match a wildcard pattern.
let mut filter = Filter::new();
let result = filter.load_buffer(
"test.filter",
bytes!(
"a*\n" //
),
);
assert_ok!(result);
assert!(filter.matches("abc"));
assert!(!filter.matches("Xbc"));
}
07070100000024000081A40000000000000000000000016878E92800000BCE000000000000000000000000000000000000003E00000000suse-kabi-tools-0.5.0+git0.9ad91db/src/text/tests_wildcard.rs// Copyright (C) 2024 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use super::*;
#[test]
fn matches_plain() {
// Check wildcard matching when the pattern contains only regular characters.
assert!(matches_wildcard("", ""));
assert!(matches_wildcard("abc", "abc"));
assert!(!matches_wildcard("abc", "Xbc"));
}
#[test]
fn matches_asterisk() {
// Check wildcard matching with an asterisk as a multi-character wildcard.
assert!(matches_wildcard("abc", "*"));
assert!(matches_wildcard("abc", "a*"));
assert!(matches_wildcard("abc", "*bc"));
assert!(matches_wildcard("abc", "a*c"));
assert!(matches_wildcard("abc", "ab*"));
assert!(matches_wildcard("abc", "a****c"));
assert!(matches_wildcard("a-b-c", "a*b*c"));
assert!(!matches_wildcard("abcd", "*bc"));
}
#[test]
fn matches_question_mark() {
// Check wildcard matching with a question mark as a single-character wildcard.
assert!(!matches_wildcard("", "?"));
assert!(matches_wildcard("a", "?"));
assert!(!matches_wildcard("ab", "?"));
assert!(matches_wildcard("abc", "?bc"));
assert!(matches_wildcard("abc", "a?c"));
assert!(matches_wildcard("abc", "ab?"));
assert!(!matches_wildcard("abc", "a???c"));
assert!(matches_wildcard("abdec", "a???c"));
assert!(matches_wildcard("a-b-c", "a?b?c"));
assert!(!matches_wildcard("abcd", "?bc"));
}
#[test]
fn matches_range() {
// Check wildcard matching with a bracketed character range.
assert!(!matches_wildcard("", "[abc]"));
assert!(matches_wildcard("a", "[abc]"));
assert!(matches_wildcard("b", "[abc]"));
assert!(matches_wildcard("c", "[abc]"));
assert!(!matches_wildcard("ab", "[abc]"));
assert!(matches_wildcard("bde", "[abc]de"));
assert!(matches_wildcard("dbe", "d[abc]e"));
assert!(matches_wildcard("deb", "de[abc]"));
assert!(matches_wildcard("dabce", "d[abc][abc][abc]e"));
assert!(!matches_wildcard("abcd", "[abc]bc"));
assert!(!matches_wildcard("", "[^abc]"));
assert!(!matches_wildcard("a", "[^abc]"));
assert!(!matches_wildcard("b", "[^abc]"));
assert!(!matches_wildcard("c", "[^abc]"));
assert!(matches_wildcard("d", "[^abc]"));
assert!(!matches_wildcard("ab", "[^abc]"));
assert!(!matches_wildcard("", "[a-c]"));
assert!(matches_wildcard("a", "[a-c]"));
assert!(matches_wildcard("b", "[a-c]"));
assert!(matches_wildcard("c", "[a-c]"));
assert!(!matches_wildcard("ab", "[a-c]"));
assert!(matches_wildcard("a", "[a-cD-EF]"));
assert!(matches_wildcard("b", "[a-cD-EF]"));
assert!(matches_wildcard("c", "[a-cD-EF]"));
assert!(matches_wildcard("D", "[a-cD-EF]"));
assert!(matches_wildcard("E", "[a-cD-EF]"));
assert!(matches_wildcard("F", "[a-cD-EF]"));
assert!(matches_wildcard("]", "[]]"));
assert!(matches_wildcard("a", "[^]]"));
assert!(matches_wildcard("-", "[-]"));
assert!(matches_wildcard("a", "[^-]"));
assert!(matches_wildcard("a", "[^]-]"));
}
07070100000025000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000002900000000suse-kabi-tools-0.5.0+git0.9ad91db/tests07070100000026000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003000000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/common07070100000027000081A40000000000000000000000016878E928000002E0000000000000000000000000000000000000003700000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/common/mod.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::ffi::OsStr;
use std::process::{Command, ExitStatus};
pub struct RunResult {
pub status: ExitStatus,
pub stdout: String,
pub stderr: String,
}
pub fn tool_run<P: AsRef<OsStr>, I: IntoIterator<Item = S>, S: AsRef<OsStr>>(
program: P,
args: I,
) -> RunResult {
let program = program.as_ref();
let output = Command::new(program)
.args(args)
.output()
.expect(&format!("failed to execute {:?}", program));
RunResult {
status: output.status,
stdout: String::from_utf8(output.stdout).unwrap(),
stderr: String::from_utf8(output.stderr).unwrap(),
}
}
07070100000028000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003300000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes07070100000029000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003F00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/compare_cmd0707010000002A000081A40000000000000000000000016878E92800000017000000000000000000000000000000000000004A00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/compare_cmd/a.symtypesfoo void foo ( int a )
0707010000002B000081A40000000000000000000000016878E92800000018000000000000000000000000000000000000004A00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/compare_cmd/b.symtypesfoo void foo ( long a )
0707010000002C000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000004300000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/consolidate_cmd0707010000002D000081A40000000000000000000000016878E92800000033000000000000000000000000000000000000004E00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/consolidate_cmd/a.symtypess#foo struct foo { int a ; }
bar int bar ( s#foo )
0707010000002E000081A40000000000000000000000016878E92800000033000000000000000000000000000000000000004E00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/consolidate_cmd/b.symtypess#foo struct foo { int a ; }
baz int baz ( s#foo )
0707010000002F000081A40000000000000000000000016878E928000013D4000000000000000000000000000000000000003B00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/main.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::ffi::OsStr;
use std::fs;
use std::path::Path;
use suse_kabi_tools::assert_inexact;
#[path = "../common/mod.rs"]
mod common;
use common::*;
fn ksymtypes_run<I: IntoIterator<Item = S>, S: AsRef<OsStr>>(args: I) -> RunResult {
tool_run(env!("CARGO_BIN_EXE_ksymtypes"), args)
}
#[test]
fn consolidate_cmd() {
// Check that the consolidate command trivially works.
let output_path = Path::new(env!("CARGO_TARGET_TMPDIR")).join("consolidate_cmd.symtypes");
fs::remove_file(&output_path).ok();
let result = ksymtypes_run([
AsRef::<OsStr>::as_ref("consolidate"),
"--output".as_ref(),
&output_path.as_ref(),
"tests/ksymtypes/consolidate_cmd".as_ref(),
]);
assert!(result.status.success());
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "");
let output_data = fs::read_to_string(output_path).expect("Unable to read the output file");
assert_eq!(
output_data,
concat!(
"/* a.symtypes */\n",
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n",
"\n",
"/* b.symtypes */\n",
"baz int baz ( s#foo )\n", //
)
);
}
#[test]
fn consolidate_cmd_missing_output() {
// Check that the consolidate command fails if no --output is specified.
let result = ksymtypes_run(["consolidate", "tests/ksymtypes/consolidate_cmd"]);
assert!(!result.status.success());
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "The consolidate output is missing\n");
}
#[test]
fn consolidate_cmd_invalid_input() {
// Check that the consolidate command correctly propagates inner errors and writes them on the
// standard error output.
let output_path =
Path::new(env!("CARGO_TARGET_TMPDIR")).join("consolidate_cmd_invalid_input.symtypes");
fs::remove_file(&output_path).ok();
let result = ksymtypes_run([
AsRef::<OsStr>::as_ref("consolidate"),
"--output".as_ref(),
&output_path.as_ref(),
"tests/missing".as_ref(),
]);
assert!(!result.status.success());
assert_eq!(result.stdout, "");
assert_inexact!(
result.stderr,
"Failed to read symtypes from 'tests/missing': Failed to query path 'tests/missing': *"
);
}
#[test]
fn split_cmd() {
// Check that the split command trivially works.
let output_path = Path::new(env!("CARGO_TARGET_TMPDIR")).join("split_cmd_output");
fs::remove_dir_all(&output_path).ok();
let result = ksymtypes_run([
AsRef::<OsStr>::as_ref("split"),
"--output".as_ref(),
&output_path.as_ref(),
"tests/ksymtypes/split_cmd/consolidated.symtypes".as_ref(),
]);
assert!(result.status.success());
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "");
assert_eq!(
fs::read_to_string(output_path.join("a.symtypes")).unwrap(),
concat!(
"s#foo struct foo { int a ; }\n",
"bar int bar ( s#foo )\n", //
)
);
assert_eq!(
fs::read_to_string(output_path.join("b.symtypes")).unwrap(),
concat!(
"s#foo struct foo { int a ; }\n",
"baz int baz ( s#foo )\n", //
)
);
}
#[test]
fn split_cmd_missing_output() {
// Check that the split command fails if no --output is specified.
let result = ksymtypes_run(["split", "tests/ksymtypes/split_cmd/consolidated.symtypes"]);
assert!(!result.status.success());
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "The split output is missing\n");
}
#[test]
fn compare_cmd() {
// Check that the compare command trivially works.
let result = ksymtypes_run([
"compare",
"tests/ksymtypes/compare_cmd/a.symtypes",
"tests/ksymtypes/compare_cmd/b.symtypes",
]);
assert!(result.status.success());
assert_eq!(
result.stdout,
concat!(
"The following '1' exports are different:\n",
" foo\n",
"\n",
"because of a changed 'foo':\n",
"@@ -1,3 +1,3 @@\n",
" void foo (\n",
"-\tint a\n",
"+\tlong a\n",
" )\n", //
)
);
assert_eq!(result.stderr, "");
}
#[test]
fn compare_cmd_dash_dash() {
// Check that operands of the compare command can be specified after '--'.
let result = ksymtypes_run([
"compare",
"--",
"tests/ksymtypes/compare_cmd/a.symtypes",
"tests/ksymtypes/compare_cmd/b.symtypes",
]);
assert!(result.status.success());
assert_eq!(
result.stdout,
concat!(
"The following '1' exports are different:\n",
" foo\n",
"\n",
"because of a changed 'foo':\n",
"@@ -1,3 +1,3 @@\n",
" void foo (\n",
"-\tint a\n",
"+\tlong a\n",
" )\n", //
)
);
assert_eq!(result.stderr, "");
}
07070100000030000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003D00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/split_cmd07070100000031000081A40000000000000000000000016878E9280000006C000000000000000000000000000000000000005300000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymtypes/split_cmd/consolidated.symtypes/* a.symtypes */
s#foo struct foo { int a ; }
bar int bar ( s#foo )
/* b.symtypes */
baz int baz ( s#foo )
07070100000032000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003200000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymvers07070100000033000041ED0000000000000000000000026878E92800000000000000000000000000000000000000000000003E00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymvers/compare_cmd07070100000034000081A40000000000000000000000016878E92800000025000000000000000000000000000000000000004800000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymvers/compare_cmd/a.symvers0x12345678 foo vmlinux EXPORT_SYMBOL
07070100000035000081A40000000000000000000000016878E92800000025000000000000000000000000000000000000004800000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymvers/compare_cmd/b.symvers0x09abcdef foo vmlinux EXPORT_SYMBOL
07070100000036000081A40000000000000000000000016878E92800000500000000000000000000000000000000000000003A00000000suse-kabi-tools-0.5.0+git0.9ad91db/tests/ksymvers/main.rs// Copyright (C) 2025 SUSE LLC <petr.pavlu@suse.com>
// SPDX-License-Identifier: GPL-2.0-or-later
use std::ffi::OsStr;
#[path = "../common/mod.rs"]
mod common;
use common::*;
fn ksymvers_run<I: IntoIterator<Item = S>, S: AsRef<OsStr>>(args: I) -> RunResult {
tool_run(env!("CARGO_BIN_EXE_ksymvers"), args)
}
#[test]
fn compare_cmd_identical() {
// Check that the comparison of two identical symvers files shows no differences.
let result = ksymvers_run([
"compare",
"tests/ksymvers/compare_cmd/a.symvers",
"tests/ksymvers/compare_cmd/a.symvers",
]);
assert!(result.status.success());
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "");
}
#[test]
fn compare_cmd_changed() {
// Check that the comparison of two different symvers files shows relevant differences and
// results in the command exiting with a non-zero status.
let result = ksymvers_run([
"compare",
"tests/ksymvers/compare_cmd/a.symvers",
"tests/ksymvers/compare_cmd/b.symvers",
]);
assert!(!result.status.success());
assert_eq!(
result.stdout,
concat!(
"Export 'foo' changed CRC from '0x12345678' to '0x09abcdef'\n", //
)
);
assert_eq!(result.stderr, "");
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!461 blocks