Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
systemsmanagement:Uyuni:Master
inter-server-sync
inter-server-sync-0.3.3.obscpio
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File inter-server-sync-0.3.3.obscpio of Package inter-server-sync
07070100000000000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001200000000inter-server-sync07070100000001000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001A00000000inter-server-sync/.github07070100000002000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000002400000000inter-server-sync/.github/workflows07070100000003000081A4000003E800000064000000016613BCD0000001F2000000000000000000000000000000000000003D00000000inter-server-sync/.github/workflows/github-actions-tests.yml# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 on: [push, pull_request] name: Test jobs: test: strategy: matrix: go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - name: Install Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - name: Checkout code uses: actions/checkout@v2 - name: Test run: go test ./... -cover -race 07070100000004000081A4000003E800000064000000016613BCD0000001AA000000000000000000000000000000000000002E00000000inter-server-sync/.github/workflows/reuse.yml# SPDX-FileCopyrightText: 2022 Free Software Foundation Europe e.V. <https://fsfe.org> # # SPDX-License-Identifier: CC0-1.0 name: REUSE Compliance Check on: push: branches: - main pull_request: types: - opened - reopened - synchronize jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: REUSE Compliance Check uses: fsfe/reuse-action@v2 07070100000005000081A4000003E800000064000000016613BCD000000094000000000000000000000000000000000000001D00000000inter-server-sync/.gitignore# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 .idea/ rhn.conf /inter-server-sync vendor.tar.gz *.pyc __pycache__ 07070100000006000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001900000000inter-server-sync/.reuse07070100000007000081A4000003E800000064000000016613BCD0000002D0000000000000000000000000000000000000001E00000000inter-server-sync/.reuse/dep5Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: inter-server-sync Upstream-Contact: Uyuni Project Source: https://github.com/uyuni-project/inter-server-sync # Sample paragraph, commented out: # # Files: src/* # Copyright: $YEAR $NAME <$CONTACT> # License: ... Files: go.mod Copyright: 2023 SUSE LLC License: Apache-2.0 Files: go.sum Copyright: 2023 SUSE LLC License: Apache-2.0 Files: inter-server-sync.changes* Copyright: 2023 SUSE LLC License: Apache-2.0 Files: inter-server-sync.spec Copyright: 2023 SUSE LLC License: Apache-2.0 Files: .tito/tito.props Copyright: 2023 SUSE LLC License: Apache-2.0 Files: .tito/packages/* Copyright: 2023 SUSE LLC License: Apache-2.0 07070100000008000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001800000000inter-server-sync/.tito07070100000009000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001F00000000inter-server-sync/.tito/custom0707010000000A000081A4000003E800000064000000016613BCD0000004E9000000000000000000000000000000000000002900000000inter-server-sync/.tito/custom/custom.py# Copyright (c) 2018 SUSE Linux Products GmbH # SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: GPL-2.0-only """ Code for building packages in SUSE that need generated code not tracked in git. """ import os from tito.builder import Builder from tito.common import info_out, run_command class SuseGitExtraGenerationBuilder(Builder): def _setup_sources(self): Builder._setup_sources(self) setup_execution_file_name = "setup.sh" setup_file_dir = os.path.join(self.git_root, self.relative_project_dir) setup_file_path = os.path.join(setup_file_dir, setup_execution_file_name) if os.path.exists(setup_file_path): info_out("Executing %s" % setup_file_path) output = run_command("[[ -x %s ]] && %s" % (setup_file_path, setup_file_path), True) filename = output.split('\n')[-1] if filename and os.path.exists(os.path.join(setup_file_dir, filename)): info_out("Copying %s to %s" % (os.path.join(setup_file_dir, filename), self.rpmbuild_sourcedir)) run_command("cp %s %s/" % (os.path.join(setup_file_dir, filename), self.rpmbuild_sourcedir), True) self.sources.append(os.path.join(self.rpmbuild_sourcedir, filename)) 0707010000000B000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000002100000000inter-server-sync/.tito/packages0707010000000C000081A4000003E800000064000000016613BCD00000000B000000000000000000000000000000000000003300000000inter-server-sync/.tito/packages/inter-server-sync0.3.3-1 ./ 0707010000000D000081A4000003E800000064000000016613BCD0000000CB000000000000000000000000000000000000002300000000inter-server-sync/.tito/tito.props[buildconfig] builder = custom.SuseGitExtraGenerationBuilder tagger = tito.tagger.SUSETagger changelog_with_email = 0 changelog_do_not_remove_cherrypick = 0 no_default_changelog = 1 lib_dir=.tito/custom 0707010000000E000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001B00000000inter-server-sync/LICENSES0707010000000F000081A4000003E800000064000000016613BCD000002828000000000000000000000000000000000000002A00000000inter-server-sync/LICENSES/Apache-2.0.txtApache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 07070100000010000081A4000003E800000064000000016613BCD000001B88000000000000000000000000000000000000002700000000inter-server-sync/LICENSES/CC0-1.0.txtCreative Commons Legal Code CC0 1.0 Universal CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER. Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; ii. moral rights retained by the original author(s) and/or performer(s); iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; v. rights protecting the extraction, dissemination, use and reuse of data in a Work; vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. 07070100000011000081A4000003E800000064000000016613BCD0000043B9000000000000000000000000000000000000002C00000000inter-server-sync/LICENSES/GPL-2.0-only.txtGNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. one line to give the program's name and an idea of what it does. Copyright (C) yyyy name of author This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. signature of Ty Coon, 1 April 1989 Ty Coon, President of Vice 07070100000012000081A4000003E800000064000000016613BCD000000BEF000000000000000000000000000000000000001C00000000inter-server-sync/README.md<!-- SPDX-FileCopyrightText: 2023 SUSE LLC SPDX-License-Identifier: Apache-2.0 --> [![REUSE status](https://api.reuse.software/badge/git.fsfe.org/reuse/api)](https://api.reuse.software/info/git.fsfe.org/reuse/api) # Inter Server Sync (ISS) [![Test](https://github.com/uyuni-project/inter-server-sync/actions/workflows/github-actions-tests.yml/badge.svg)](https://github.com/uyuni-project/inter-server-sync/actions/workflows/github-actions-tests.yml) ## Usage run the command for more information: `inter-server-sync -h` ## Known limitations - Source and target servers need to be on the same version. - Export and import organization should have the same name. - Export folder needs to be sync by hand to the target server. ### on source server - **Create export dir**: `mkdir ~/export` - **Run command**: `inter-server-sync export --serverConfig=/etc/rhn/rhn.conf --outputDir=~/export --channels=channel_label,channel_label` - **Copy export directory to target server**: `rsync -r ~/export root@<Target_server>:~/` ### on target server - **Run command: `inter-server-sync import --importDir ~/export/` ## Database connection configuration Database connection configuration are loaded by default from `/etc/rhn/rhn.conf`. File location can be overwritten. For development environments one can use a sample file in this project. Steps to run in locally in development mode: 1. copy sample file `cp rhn.conf.exaple rhn.conf` 2. fill all properties in `rhn.conf` with the appropriated values 3. use this configuration file by specifying the config parameter: `go run . -config=rhn.conf` ## Extra ### Dot graph with schema metadata `go run . dot --serverConfig=rhn.conf | dot -Tx11` ## Build and release ### 1. Create tag - Install `uyuni-releng-tools` from [systemsmanagement:Uyuni:Utils](https://build.opensuse.org/project/show/systemsmanagement:Uyuni:Utils) - Create a tag with the version number using `tito` and push it to github ``` tito tag git push origin inter-server-sync-x.y.z-1 ``` ### 2. Create a github release (optional) - On github create a new version release based on the previous tag ### 3. OBS: project preparation - Projects names: - Uyuni: `systemsmanagement:Uyuni:Master` - Head: Devel: `Galaxy:Manager:Head` - Manager 4.3: `Devel:Galaxy:Manager:4.3` - Package name: `inter-server-sync` In the checked out git repo: ``` export OSCAPI=https://api.opensuse.org osc -A https://api.opensuse.org branch systemsmanagement:Uyuni:Master export OBS_PROJ=home:<your_nick>:branches:systemsmanagement:Uyuni:Master build-packages-for-obs && push-packages-to-obs ``` ### 4. OBS: create submit requests Uyuni: `osc -A https://api.opensuse.org sr --no-cleanup <your_project> inter-server-sync systemsmanagement:Uyuni:Master` Manager Head: `osc -A https://api.suse.de sr --no-cleanup openSUSE.org:<your_project> inter-server-sync Devel:Galaxy:Manager:Head` For each maintained SUSE Manager version, one SR in the form: `iosc sr --no-cleanup openSUSE.org:<your_project> inter-server-sync Devel:Galaxy:Manager:X.Y` 07070100000013000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001600000000inter-server-sync/cmd07070100000014000081A4000003E800000064000000016613BCD0000002B5000000000000000000000000000000000000001D00000000inter-server-sync/cmd/dot.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "github.com/spf13/cobra" "github.com/uyuni-project/inter-server-sync/entityDumper" "github.com/uyuni-project/inter-server-sync/schemareader" ) // dotCmd represents the dot command var dotCmd = &cobra.Command{ Use: "dot", Short: "export database schema as dot diagram", Hidden: true, Run: func(cmd *cobra.Command, args []string) { db := schemareader.GetDBconnection(serverConfig) defer db.Close() tables := schemareader.ReadTablesSchema(db, entityDumper.SoftwareChannelTableNames()) schemareader.DumpToGraphviz(tables) }, } func init() { rootCmd.AddCommand(dotCmd) } 07070100000015000081A4000003E800000064000000016613BCD000000BFB000000000000000000000000000000000000002000000000inter-server-sync/cmd/export.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "os" "path" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/uyuni-project/inter-server-sync/entityDumper" "github.com/uyuni-project/inter-server-sync/utils" ) var exportCmd = &cobra.Command{ Use: "export", Short: "Export server entities to be imported in other server", Run: runExport, } var channels []string var channelWithChildren []string var configChannels []string var outputDir string var metadataOnly bool var startingDate string var includeImages bool var includeContainers bool var orgs []uint func init() { exportCmd.Flags().StringSliceVar(&channels, "channels", nil, "Channels to be exported") exportCmd.Flags().StringSliceVar(&channelWithChildren, "channel-with-children", nil, "Channels to be exported") exportCmd.Flags().StringVar(&outputDir, "outputDir", ".", "Location for generated data") exportCmd.Flags().BoolVar(&metadataOnly, "metadataOnly", false, "export only metadata") exportCmd.Flags().StringVar(&startingDate, "packagesOnlyAfter", "", "Only export packages added or modified after the specified date (date format can be 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss')") exportCmd.Flags().StringSliceVar(&configChannels, "configChannels", nil, "Configuration Channels to be exported") exportCmd.Flags().BoolVar(&includeImages, "images", false, "Export OS images and associated metadata") exportCmd.Flags().BoolVar(&includeContainers, "containers", false, "Export containers metadata") exportCmd.Flags().UintSliceVar(&orgs, "orgLimit", nil, "Export only for specified organizations") exportCmd.Args = cobra.NoArgs rootCmd.AddCommand(exportCmd) } func runExport(cmd *cobra.Command, args []string) { log.Info().Msg("Export started") // check output dir existence and create it if needed. // Validate data validatedDate, ok := utils.ValidateDate(startingDate) if !ok { log.Fatal().Msg("Unable to validate the date. Allowed formats are 'YYYY-MM-DD' or 'YYYY-MM-DD hh:mm:ss'") } options := entityDumper.DumperOptions{ ServerConfig: serverConfig, ChannelLabels: channels, ConfigLabels: configChannels, ChannelWithChildrenLabels: channelWithChildren, OutputFolder: outputDir, MetadataOnly: metadataOnly, StartingDate: validatedDate, OSImages: includeImages, Containers: includeContainers, Orgs: orgs, } entityDumper.DumpAllEntities(options) var versionfile string versionfile = path.Join(utils.GetAbsPath(outputDir), "version.txt") vf, err := os.Open(versionfile) defer vf.Close() if os.IsNotExist(err) { f, err := os.Create(versionfile) if err != nil { log.Panic().Msg("Unable to create version file") } vf = f } version, product := utils.GetCurrentServerVersion(serverConfig) vf.WriteString("product_name = " + product + "\n" + "version = " + version + "\n") log.Info().Msgf("Export done. Directory: %s", outputDir) } 07070100000016000081ED000003E800000064000000016613BCD000001ABC000000000000000000000000000000000000002000000000inter-server-sync/cmd/import.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "fmt" "io" "os" "os/exec" "path" "strings" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/uyuni-project/inter-server-sync/dumper/pillarDumper" "github.com/uyuni-project/inter-server-sync/utils" "github.com/uyuni-project/inter-server-sync/xmlrpc" ) var importCmd = &cobra.Command{ Use: "import", Short: "Import data to server", Run: runImport, } var importDir string var xmlRpcUser string var xmlRpcPassword string func init() { importCmd.Flags().StringVar(&importDir, "importDir", ".", "Location import data from") importCmd.Flags().StringVar(&xmlRpcUser, "xmlRpcUser", "admin", "A username to access the XML-RPC Api") importCmd.Flags().StringVar(&xmlRpcPassword, "xmlRpcPassword", "admin", "A password to access the XML-RPC Api") importCmd.Args = cobra.NoArgs rootCmd.AddCommand(importCmd) } func runImport(cmd *cobra.Command, args []string) { absImportDir := utils.GetAbsPath(importDir) log.Info().Msg(fmt.Sprintf("starting import from dir %s", absImportDir)) fversion, fproduct := getImportVersionProduct(absImportDir) sversion, sproduct := utils.GetCurrentServerVersion(serverConfig) if fversion != sversion || fproduct != sproduct { log.Panic().Msgf("Wrong version detected. Fileversion = %s ; Serverversion = %s", fversion, sversion) } validateFolder(absImportDir) runPackageFileSync(absImportDir) runImageFileSync(absImportDir, serverConfig) runImportSql(absImportDir, serverConfig) log.Info().Msg("import finished") } func getImportVersionProduct(path string) (string, string) { var versionfile string versionfile = path + "/version.txt" version, err := utils.ScannerFunc(versionfile, "version") if err != nil { log.Error().Msg("Version not found.") } product, err := utils.ScannerFunc(versionfile, "product_name") if err != nil { log.Fatal().Msg("Product not found") } log.Debug().Msgf("Import Product: %s; Version: %s", product, version) return version, product } func validateFolder(absImportDir string) { _, err := os.Stat(fmt.Sprintf("%s/sql_statements.sql.gz", absImportDir)) if err != nil { if os.IsNotExist(err) { _, err = os.Stat(fmt.Sprintf("%s/sql_statements.sql", absImportDir)) if err != nil { log.Fatal().Err(err).Msg("No usable .sql or .gz file found in import directory") } } else { log.Fatal().Err(err) } } } func hasConfigChannels(absImportDir string) bool { _, err := os.Stat(fmt.Sprintf("%s/exportedConfigs.txt", absImportDir)) log.Info().Err(err).Msg(fmt.Sprintf("no export config file found: %s/exportedConfigs.txt", absImportDir)) return err == nil || os.IsExist(err) } func runPackageFileSync(absImportDir string) { packagesImportDir := fmt.Sprintf("%s/packages/", absImportDir) err := utils.FolderExists(packagesImportDir) if err != nil { if os.IsNotExist(err) { log.Info().Msg("no package files to import") return } else { log.Fatal().Err(err).Msg("Error getting import packages folder") } } rsyncParams := make([]string, 0) if log.Debug().Enabled() { rsyncParams = append(rsyncParams, "-v") } rsyncParams = append(rsyncParams, "-og", "--chown=wwwrun:www", "-r", packagesImportDir, "/var/spacewalk/packages/") cmd := exec.Command("rsync", rsyncParams...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr log.Info().Msg("starting importing package files") err = cmd.Run() if err != nil { log.Fatal().Err(err).Msg("error importing package files") } } func runConfigFilesSync(labels []string, user string, password string) (interface{}, error) { client := xmlrpc.NewClient(user, password) return client.SyncConfigFiles(labels) } func runImageFileSync(absImportDir string, serverConfig string) { imagesImportDir := path.Join(absImportDir, "images") err := utils.FolderExists(imagesImportDir) if err != nil { if os.IsNotExist(err) { log.Info().Msg("No image files to import") return } else { log.Fatal().Err(err).Msg("Error reading import folder for images") } } rsyncParams := make([]string, 0) if log.Debug().Enabled() { rsyncParams = append(rsyncParams, "-v") } rsyncParams = append(rsyncParams, "-og", "--chown=salt:susemanager", "--chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r", "-r", "--exclude=pillars", imagesImportDir+"/", "/srv/www/os-images") cmd := exec.Command("rsync", rsyncParams...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr log.Info().Msg("Copying image files") err = cmd.Run() if err != nil { log.Fatal().Err(err).Msg("Error importing image files") } pillarImportDir := path.Join(absImportDir, "images", "pillars") err = utils.FolderExists(pillarImportDir) if err != nil { if os.IsNotExist(err) { log.Debug().Msg("No pillar files to import") return } else { log.Fatal().Err(err).Msg("Error reading import folder for pillars") } } log.Info().Msg("Copying image pillar files") pillarDumper.ImportImagePillars(pillarImportDir, utils.GetCurrentServerFQDN(serverConfig)) } func importSqlFile(absImportDir string) { cmd := exec.Command("spacewalk-sql", fmt.Sprintf("%s/sql_statements.sql", absImportDir)) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr log.Info().Msg("Starting SQL import") err := cmd.Run() if err != nil { log.Fatal().Err(err).Msgf("Error running the SQL script") } } func importGzFile(absImportDir string) { cUnzip := exec.Command("gunzip", "-c", fmt.Sprintf("%s/sql_statements.sql.gz", absImportDir)) cImport := exec.Command("spacewalk-sql", "-") pr, pw := io.Pipe() cUnzip.Stdout = pw cUnzip.Stderr = os.Stderr cImport.Stdin = pr cImport.Stdout = os.Stdout cImport.Stderr = os.Stderr log.Info().Msg("Starting SQL/GZ import") cUnzip.Start() cImport.Start() go func() { defer pw.Close() cUnzip.Wait() }() err := cImport.Wait() if err != nil { log.Fatal().Err(err).Msgf("Error running the SQL script") } } func runImportSql(absImportDir string, serverConfig string) { if _, err := os.Stat(fmt.Sprintf("%s/sql_statements.sql.gz", absImportDir)); err == nil { importGzFile(absImportDir) } else { if _, err := os.Stat(fmt.Sprintf("%s/sql_statements.sql", absImportDir)); err == nil { importSqlFile(absImportDir) } } pillarDumper.UpdateImagePillars(serverConfig) if hasConfigChannels(absImportDir) { labels := utils.ReadFileByLine(fmt.Sprintf("%s/exportedConfigs.txt", absImportDir)) log.Debug().Msg("Will call xml-rpc API to update filesystem") _, err := runConfigFilesSync(labels, xmlRpcUser, xmlRpcPassword) if err != nil { log.Error().Err(err).Msgf( "Error recreating configuration files. Please run spacecmd api configchannel.syncSaltFilesOnDisk -A '[[%s]]'", strings.Join(labels, ", "), ) } } else { log.Debug().Msg("No configuration channels, NO CALL to xml-rpc API") } } 07070100000017000081A4000003E800000064000000016613BCD000000CC3000000000000000000000000000000000000001E00000000inter-server-sync/cmd/root.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "fmt" "log/syslog" "os" "runtime/pprof" "strconv" "strings" "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) var Version = "0.0.0" var rootCmd = &cobra.Command{ Use: "inter-server-sync", Short: "Uyuni Inter Server Sync tool", Version: Version, } func Execute() { cobra.CheckErr(rootCmd.Execute()) } // var cfgFile string var logLevel string var serverConfig string var cpuProfile string var memProfile string func init() { rootCmd.PersistentPreRun = func(cmd *cobra.Command, args []string) { logInit() cpuProfileInit() memProfileDump() } rootCmd.PersistentPostRun = func(cmd *cobra.Command, args []string) { cpuProfileTearDown() } rootCmd.PersistentFlags().StringVar(&logLevel, "logLevel", "error", "application log level") rootCmd.PersistentFlags().StringVar(&serverConfig, "serverConfig", "/etc/rhn/rhn.conf", "Server configuration file") rootCmd.PersistentFlags().StringVar(&cpuProfile, "cpuProfile", "", "cpuProfile export folder location") rootCmd.PersistentFlags().StringVar(&memProfile, "memProfile", "", "memProfile export folder location") } func logCallerMarshalFunction(file string, line int) string { paths := strings.Split(file, "/") callerFile := file foundSubDir := false for _, currentPath := range paths { if foundSubDir { if callerFile != "" { callerFile = callerFile + "/" } callerFile = callerFile + currentPath } else { if strings.Contains(currentPath, "inter-server-sync") { foundSubDir = true callerFile = "" } } } return callerFile + ":" + strconv.Itoa(line) } func logInit() { syslogger, err := syslog.New(syslog.LOG_INFO|syslog.LOG_DEBUG|syslog.LOG_WARNING|syslog.LOG_ERR, "inter-server-sync") syslogwriter := zerolog.SyslogLevelWriter(syslogger) multi := zerolog.MultiLevelWriter(syslogwriter, os.Stdout) log.Logger = zerolog.New(multi).With().Timestamp().Caller().Logger() zerolog.CallerMarshalFunc = logCallerMarshalFunction level, err := zerolog.ParseLevel(logLevel) if err != nil { level = zerolog.InfoLevel } zerolog.SetGlobalLevel(level) log.Info().Msg("Inter server sync started") } func cpuProfileInit() { if cpuProfile != "" { f, err := os.Create(cpuProfile + "end_cpu_profile.prof") if err != nil { log.Error().Err(err).Msg("could not create CPU profile: ") } defer f.Close() // error handling omitted for example if err := pprof.StartCPUProfile(f); err != nil { log.Panic().Err(err).Msg("could not start CPU profile: ") } } } func cpuProfileTearDown() { if cpuProfile != "" { pprof.StopCPUProfile() } } func memProfileDump() { if log.Debug().Enabled() && len(memProfile) > 0 { go func() { count := 0 for { time.Sleep(30 * time.Second) fileName := fmt.Sprintf("%s/memory_profile_%d.prof", memProfile, count) f, err := os.Create(fileName) if err != nil { log.Error().Err(err).Msg(fmt.Sprintf("could not create memory profile file: %s", fileName)) break } if err := pprof.WriteHeapProfile(f); err != nil { log.Error().Err(err).Msg("could not write memory profile: ") } f.Close() count++ } }() } } 07070100000018000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001900000000inter-server-sync/dumper07070100000019000081A4000003E800000064000000016613BCD000001899000000000000000000000000000000000000002900000000inter-server-sync/dumper/crawler_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "reflect" "testing" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/tests" ) // crawlerTestCase lays down a test scenario for the DataCrawler func type crawlerTestCase struct { repo *tests.DataRepository schemaMetadata MetaDataGraph startTable schemareader.Table startQueryFilter string expectedDataDumper DataDumper } func TestShouldCreateDataDumper(t *testing.T) { // Arrange graph := TablesGraph{ // first order "root": []string{"v31", "v32"}, "v31": []string{"v35", "v36"}, "v32": []string{"v33"}, // second order "v33": []string{"v34"}, // third order with circular dependency "v34": []string{"v35", "v36"}, "v35": []string{"v34"}, "v36": []string{}, } root := "root" testCase := createDataCrawlerTestCase(graph, root) // the data repository expect these statements in the exact same order testCase.repo.Expect("SELECT * FROM root WHERE CUSTOM ;", testCase.schemaMetadata["root"].Columns, 1) testCase.repo.Expect("SELECT id, v35_fk_id, v36_fk_id FROM v31 WHERE id = $1;", testCase.schemaMetadata["v31"].Columns, 1) testCase.repo.Expect("SELECT id, v33_fk_id FROM v32 WHERE id = $1;", testCase.schemaMetadata["v32"].Columns, 1) testCase.repo.Expect("SELECT id, v34_fk_id FROM v33 WHERE id = $1;", testCase.schemaMetadata["v33"].Columns, 1) testCase.repo.Expect("SELECT id, v35_fk_id, v36_fk_id FROM v34 WHERE id = $1;", testCase.schemaMetadata["v34"].Columns, 1) testCase.repo.Expect("SELECT id, v34_fk_id FROM v35 WHERE id = $1;", testCase.schemaMetadata["v35"].Columns, 1) testCase.repo.Expect("SELECT id FROM v36 WHERE id = $1;", testCase.schemaMetadata["v36"].Columns, 1) testCase.repo.Expect("SELECT id, v34_fk_id FROM v35 WHERE id = $1;", testCase.schemaMetadata["v35"].Columns, 1) testCase.repo.Expect("SELECT id FROM v36 WHERE id = $1;", testCase.schemaMetadata["v36"].Columns, 1) // Act dataDumper := DataCrawler( testCase.repo.DB, testCase.schemaMetadata, testCase.startTable, testCase.startQueryFilter, "2022-01-01", ) // Assert if dataDumper.TableData == nil || dataDumper.Paths == nil { t.Errorf("DataDumper was not initiated") } tableDataEqual := reflect.DeepEqual(dataDumper.TableData, testCase.expectedDataDumper.TableData) if !tableDataEqual { t.Errorf("DataDumper.TableData is not expected") } pathsEqual := reflect.DeepEqual(dataDumper.Paths, testCase.expectedDataDumper.Paths) if !pathsEqual { t.Errorf("DataDumper.Paths is not expected") } } // createTestCase is a factory method for writerTestCase func createDataCrawlerTestCase(graph TablesGraph, root string) crawlerTestCase { repo := tests.CreateDataRepository() tablesMetaData, dataDumper := initializeMetaDataGraph(graph, root) return crawlerTestCase{ repo: repo, schemaMetadata: tablesMetaData, startTable: tablesMetaData[root], startQueryFilter: "CUSTOM", expectedDataDumper: dataDumper, } } // followLinkTestCase lays down a test scenario for the shouldFollowReferenceToLink func type followLinkTestCase struct { path []string // path constructed by a recursive function so far currentTable schemareader.Table referencedTable schemareader.Table } func TestShouldFollowForcedNavigations(t *testing.T) { // Arrange var shouldFollow bool testCase := followLinkTestCase{ path: []string{}, currentTable: schemareader.Table{ Name: "rhnchannel", }, referencedTable: schemareader.Table{ Name: "susemddata", }, } // Act shouldFollow = shouldFollowReferenceToLink( testCase.path, testCase.currentTable, testCase.referencedTable, ) // Assert if !shouldFollow { t.Errorf("Should follow along forcedNavigations to the referencedTable") } } func TestShouldNotFollowInPath(t *testing.T) { // Arrange var shouldFollow bool testCase := followLinkTestCase{ path: []string{"target"}, currentTable: schemareader.Table{ Name: "source", }, referencedTable: schemareader.Table{ Name: "target", }, } // Act shouldFollow = shouldFollowReferenceToLink( testCase.path, testCase.currentTable, testCase.referencedTable, ) // Assert if shouldFollow { t.Errorf("Should not follow the referencedTable if it is already in the path") } } func TestShouldFollowLinkingTable(t *testing.T) { // Arrange var shouldFollow bool testCase := followLinkTestCase{ path: []string{}, currentTable: schemareader.Table{ Name: "source", }, referencedTable: schemareader.Table{ Name: "sourcetarget", References: []schemareader.Reference{{TableName: "targetreference"}}, }, } // Act shouldFollow = shouldFollowReferenceToLink( testCase.path, testCase.currentTable, testCase.referencedTable, ) // Assert if !shouldFollow { t.Errorf("Should follow the referencedTable if it is a linking table and not referenced by others") } } func TestShouldNotFollowLinkingTable(t *testing.T) { // Arrange var shouldFollow bool testCase := followLinkTestCase{ path: []string{"targetreference"}, currentTable: schemareader.Table{ Name: "source", }, referencedTable: schemareader.Table{ Name: "sourcetarget", References: []schemareader.Reference{{TableName: "targetreference"}}, }, } // Act shouldFollow = shouldFollowReferenceToLink( testCase.path, testCase.currentTable, testCase.referencedTable, ) // Assert if shouldFollow { t.Errorf("Should not follow the referencedTable if one of the tables it itself references is already in the path") } } func TestShouldNotFollowReferencedLinkingTable(t *testing.T) { // Arrange var shouldFollow bool testCase := followLinkTestCase{ path: []string{}, currentTable: schemareader.Table{ Name: "source", }, referencedTable: schemareader.Table{ Name: "sourcetarget", ReferencedBy: []schemareader.Reference{{TableName: "other"}}, }, } // Act shouldFollow = shouldFollowReferenceToLink( testCase.path, testCase.currentTable, testCase.referencedTable, ) // Assert if shouldFollow { t.Errorf("Should not follow the referencedTable if it is a linking table but also is referenced by others") } } 0707010000001A000081A4000003E800000064000000016613BCD0000027C7000000000000000000000000000000000000002800000000inter-server-sync/dumper/dataCrawler.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "database/sql" "fmt" "strings" "time" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) // DataCrawler will go through all the elements in the initialDataSet an extract related data // for all tables presented in the schemaMetadata by following foreign keys and references to the table row // The result will be a structure containing ID of each row which should be exported per table func DataCrawler(db *sql.DB, schemaMetadata map[string]schemareader.Table, startTable schemareader.Table, startQueryFilter string, startingDate string) DataDumper { result := DataDumper{make(map[string]TableDump, 0), make(map[string]bool)} itemsToProcess := initialDataSet(db, startTable, startQueryFilter) if log.Debug().Enabled() { go func() { count := 0 for { time.Sleep(30 * time.Second) if len(itemsToProcess) == 0 { break } keysSize := 0 maxSize := 0 table := "" for key, value := range result.TableData { keysSize = keysSize + len(value.KeyMap) if len(value.KeyMap) > maxSize { maxSize = len(value.KeyMap) table = key } } log.Debug().Msgf("#count: %d #rowsToProcess: #%d ; #rowsToDiscover: #%d --> Bigger export table: %s: #%d", count, len(itemsToProcess), keysSize, table, maxSize) count++ } }() } IterateItemsLoop: for len(itemsToProcess) > 0 { // LIFO instead of FIFO improves performance itemToProcess := itemsToProcess[len(itemsToProcess)-1] itemsToProcess = itemsToProcess[0 : len(itemsToProcess)-1] table, tableExists := schemaMetadata[itemToProcess.tableName] if !tableExists { continue IterateItemsLoop } keyColumnData := extractRowKeyData(table, itemToProcess) keyIdToMap := generateKeyIdToMap(keyColumnData) resultTableValues, resultExists := result.TableData[table.Name] if resultExists { _, rowProcessed := resultTableValues.KeyMap[keyIdToMap] if rowProcessed { continue IterateItemsLoop } } else { resultTableValues = TableDump{TableName: table.Name, KeyMap: make(map[string]bool), Keys: make([]TableKey, 0)} } resultTableValues.KeyMap[keyIdToMap] = true resultTableValues.Keys = append(resultTableValues.Keys, keyColumnData) result.TableData[table.Name] = resultTableValues _, okPath := result.Paths[strings.Join(itemToProcess.path, ",")] if !okPath { result.Paths[strings.Join(itemToProcess.path, ",")] = true } newItems := append(followReferencesTo(db, schemaMetadata, table, itemToProcess, startingDate), followReferencesFrom(db, schemaMetadata, table, itemToProcess, startingDate)...) itemsToProcess = append(itemsToProcess, newItems...) } return result } func initialDataSet(db *sql.DB, startTable schemareader.Table, whereFilter string) []processItem { whereClause := "" if len(whereFilter) > 0 { whereClause = fmt.Sprintf("WHERE %s", whereFilter) } sql := fmt.Sprintf(`SELECT * FROM %s %s ;`, startTable.Name, whereClause) rows := sqlUtil.ExecuteQueryWithResults(db, sql) initialDataSet := make([]processItem, 0) for _, row := range rows { initialDataSet = append(initialDataSet, processItem{startTable.Name, row, []string{startTable.Name}}) } return initialDataSet } func generateKeyIdToMap(data TableKey) string { keyValuesList := make([]string, 0) for _, value := range data.Key { valueStr := fmt.Sprintf("%s", value.Value) keyValuesList = append(keyValuesList, valueStr) } return strings.Join(keyValuesList, "$$") } func extractRowKeyData(table schemareader.Table, itemToProcess processItem) TableKey { keys := make([]RowKey, 0) if len(table.PKColumns) > 0 { for pkColumn, _ := range table.PKColumns { keys = append(keys, RowKey{pkColumn, formatField(itemToProcess.row[table.ColumnIndexes[pkColumn]])}) } } else { for _, pkColumn := range table.UniqueIndexes[table.MainUniqueIndexName].Columns { keys = append(keys, RowKey{pkColumn, formatField(itemToProcess.row[table.ColumnIndexes[pkColumn]])}) } } return TableKey{keys} } func shouldApplyStartingDate(startingDate string, tableName string) bool { return startingDate != "" && (tableName == "rhnchannelerrata" || tableName == "rhnchannelpackage" || tableName == "susemddata" || tableName == "rhnerratafilechannel") } func followReferencesFrom(db *sql.DB, schemaMetadata map[string]schemareader.Table, table schemareader.Table, row processItem, startingDate string) []processItem { result := make([]processItem, 0) for _, reference := range table.References { foreignTable, ok := schemaMetadata[reference.TableName] if !ok { continue } targetTableVisited := false for _, p := range row.path { if strings.Compare(p, foreignTable.Name) == 0 { targetTableVisited = true break } } if targetTableVisited { continue } whereParameters := make([]string, 0) scanParameters := make([]interface{}, 0) for localColumn, foreignColumn := range reference.ColumnMapping { whereParameters = append(whereParameters, fmt.Sprintf("%s = $%d", foreignColumn, len(whereParameters)+1)) scanParameters = append(scanParameters, row.row[table.ColumnIndexes[localColumn]].Value) } if shouldApplyStartingDate(startingDate, reference.TableName) { whereParameters = append(whereParameters, fmt.Sprintf("%s >= '$%d'::timestamp", "modified", len(whereParameters)+1)) scanParameters = append(scanParameters, startingDate) } formattedColumns := strings.Join(foreignTable.Columns, ", ") formattedWhereParameters := strings.Join(whereParameters, " and ") sql := fmt.Sprintf(`SELECT %s FROM %s WHERE %s;`, formattedColumns, reference.TableName, formattedWhereParameters) followRows := sqlUtil.ExecuteQueryWithResults(db, sql, scanParameters...) if len(followRows) > 0 { for _, followRow := range followRows { newPath := make([]string, 0) newPath = append(newPath, row.path...) newPath = append(newPath, foreignTable.Name) result = append(result, processItem{foreignTable.Name, followRow, newPath}) } } } return result } func shouldFollowToLinkPreOrder(path []string, currentTable schemareader.Table, referencedTable schemareader.Table) bool { forbiddenNavigations := map[string][]string{ "rhnconfigfile": {"rhnconfigrevision"}, } if tableNavigation, ok := forbiddenNavigations[currentTable.Name]; ok { for _, targetNavigationTable := range tableNavigation { if strings.Compare(targetNavigationTable, referencedTable.Name) == 0 { return false } } } return true } func shouldFollowReferenceToLink(path []string, currentTable schemareader.Table, referencedTable schemareader.Table) bool { // if we already passed by the referencedTable we don't want to follow for _, p := range path { if strings.Compare(p, referencedTable.Name) == 0 { return false } } forcedNavigations := map[string][]string{ "rhnchannelfamily": {"rhnpublicchannelfamily"}, "rhnchannel": {"susemddata", "suseproductchannel", "rhnreleasechannelmap", "rhndistchannelmap", "rhnerratafilechannel"}, "suseproducts": {"suseproductextension", "suseproductsccrepository"}, "rhnpackageevr": {"rhnpackagenevra"}, "rhnerrata": {"rhnerratafile"}, "rhnconfigchannel": {"rhnconfigfile"}, "rhnconfigfile": {"rhnconfigrevision"}, } if tableNavigation, ok := forcedNavigations[currentTable.Name]; ok { for _, targetNavigationTable := range tableNavigation { if strings.Compare(targetNavigationTable, referencedTable.Name) == 0 { return true } } } // If referencedTable don't have any link to it, we should try to use it // Also in the referencedTable it the currentTable is the linking table dominant, by comparing is name if len(referencedTable.ReferencedBy) == 0 && strings.HasPrefix(referencedTable.Name, currentTable.Name) { for _, ref := range referencedTable.References { //In the referencedTable we will go through all the references // ignoring the ones to the currentTable. // And see if we have already passed (part of path) in one of the reference tables of referencedTable // If we already passed, we should not follow this path, because we have been already here if strings.Compare(currentTable.Name, ref.TableName) != 0 { for _, p := range path { if strings.Compare(p, ref.TableName) == 0 { return false } } } } return true } return false } func followReferencesTo(db *sql.DB, schemaMetadata map[string]schemareader.Table, table schemareader.Table, row processItem, startingDate string) []processItem { result := make([]processItem, 0) for _, reference := range table.ReferencedBy { referencedTable, ok := schemaMetadata[reference.TableName] if !ok { continue } if !shouldFollowReferenceToLink(row.path, table, referencedTable) { continue } whereParameters := make([]string, 0) scanParameters := make([]interface{}, 0) for localColumn, foreignColumn := range reference.ColumnMapping { whereParameters = append(whereParameters, fmt.Sprintf("%s = $%d", localColumn, len(whereParameters)+1)) scanParameters = append(scanParameters, row.row[table.ColumnIndexes[foreignColumn]].Value) } if shouldApplyStartingDate(startingDate, referencedTable.Name) { whereParameters = append(whereParameters, fmt.Sprintf("%s >= $%d::timestamp", "modified", len(whereParameters)+1)) scanParameters = append(scanParameters, startingDate) } formattedColumns := strings.Join(referencedTable.Columns, ", ") formattedWhereParameters := strings.Join(whereParameters, " and ") sql := fmt.Sprintf(`SELECT %s FROM %s WHERE %s;`, formattedColumns, reference.TableName, formattedWhereParameters) followRows := sqlUtil.ExecuteQueryWithResults(db, sql, scanParameters...) if len(followRows) > 0 { for _, followRow := range followRows { newPath := make([]string, 0) newPath = append(newPath, row.path...) newPath = append(newPath, referencedTable.Name) result = append(result, processItem{referencedTable.Name, followRow, newPath}) } } } return result } 0707010000001B000081A4000003E800000064000000016613BCD0000058B1000000000000000000000000000000000000002700000000inter-server-sync/dumper/dataWriter.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "bufio" "database/sql" "encoding/json" "fmt" "strings" "time" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/sqlUtil" "github.com/lib/pq" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/utils" ) var cache = make(map[string]string) var referrencesCall = make(map[string]int) func PrintTableDataOrdered(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, startingTable schemareader.Table, data DataDumper, options PrintSqlOptions) { printCleanTables(db, writer, schemaMetadata, startingTable, make(map[string]bool), make([]string, 0), options) writer.WriteString("-- end of clean tables") writer.WriteString("\n") orderedTables := getTablesExportOrder(schemaMetadata, startingTable, make(map[string]bool), make([]string, 0)) exportTablesData(db, writer, schemaMetadata, orderedTables, data, options) // clean cache for the next channel that can be exported cache = make(map[string]string) } /* * clear tables need to be printed in reverse order, otherwise it will not work */ func printCleanTables(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, processedTables map[string]bool, path []string, options PrintSqlOptions) { _, tableProcessed := processedTables[table.Name] // if the current table should not be export we are interrupting the crawler process for these table // not exporting other tables relations if tableProcessed || !table.Export { return } processedTables[table.Name] = true path = append(path, table.Name) // follow reference by for _, reference := range table.ReferencedBy { tableReference, ok := schemaMetadata[reference.TableName] if !ok || !tableReference.Export { continue } if !shouldFollowReferenceToLink(path, table, tableReference) { continue } printCleanTables(db, writer, schemaMetadata, tableReference, processedTables, path, options) } if utils.Contains(options.TablesToClean, table.Name) { generateClearTable(db, writer, table, path, schemaMetadata, options) } for _, reference := range table.References { tableReference, ok := schemaMetadata[reference.TableName] if !ok || !tableReference.Export { continue } printCleanTables(db, writer, schemaMetadata, tableReference, processedTables, path, options) } } func exportTablesData(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, tablesOrdered []schemareader.Table, data DataDumper, options PrintSqlOptions) { processing := true totalExportedRecords := 0 if log.Debug().Enabled() { totalRecords := 0 for _, value := range data.TableData { totalRecords = totalRecords + len(value.Keys) } go func() { count := 0 for { time.Sleep(30 * time.Second) if !processing { break } log.Debug().Msgf("#count: %d #cacheSize %d -- #writtenRows: #%d of %d", count, len(cache), totalExportedRecords, totalRecords) count++ } }() } tableCount := 1 for _, table := range tablesOrdered { // export current table data log.Debug().Msg(fmt.Sprintf("Writing data for table [%d/%d] %s", tableCount, len(tablesOrdered), table.Name)) tableCount++ totalExportedRecords += exportCurrentTableData(db, writer, schemaMetadata, table, data, options) } // post-processing callback for _, table := range tablesOrdered { if options.PostOrderCallback != nil { options.PostOrderCallback(db, writer, schemaMetadata, table, data) } } processing = false if log.Debug().Enabled() { valMarshal, errMarshal := json.Marshal(referrencesCall) if errMarshal == nil { log.Debug().Msg(fmt.Sprintf("Referrence count resolver by table: %s", string(valMarshal))) } } } func exportCurrentTableData(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, data DataDumper, options PrintSqlOptions) int { totalExportedRecords := 0 tableData, dataOK := data.TableData[table.Name] if dataOK { exportPoint := 0 batch := 100 for len(tableData.Keys) > exportPoint { upperLimit := exportPoint + batch if upperLimit > len(tableData.Keys) { upperLimit = len(tableData.Keys) } rows := GetRowsFromKeys(db, table, tableData.Keys[exportPoint:upperLimit]) totalExportedRecords = totalExportedRecords + len(rows) for _, rowValue := range rows { rowToInsert := generateRowInsertStatement(db, rowValue, table, schemaMetadata, options.OnlyIfParentExistsTables) writer.WriteString(rowToInsert + "\n") } exportPoint = upperLimit } } return totalExportedRecords } func getTablesExportOrder(schemaMetadata map[string]schemareader.Table, table schemareader.Table, processedTables map[string]bool, path []string) []schemareader.Table { _, tableProcessed := processedTables[table.Name] // if the current table should not be export we are interrupting the crawler process for these table // not exporting other tables relations if tableProcessed || !table.Export { return make([]schemareader.Table, 0) } processedTables[table.Name] = true path = append(path, table.Name) // follow reference to tableReferences := make([]schemareader.Table, 0) for _, reference := range table.References { tableReference, ok := schemaMetadata[reference.TableName] if ok && tableReference.Export && shouldFollowToLinkPreOrder(path, table, tableReference) { tableReferences = append(tableReferences, getTablesExportOrder(schemaMetadata, tableReference, processedTables, path)...) } } // follow reference by tableReferencesBy := make([]schemareader.Table, 0) for _, reference := range table.ReferencedBy { tableReference, ok := schemaMetadata[reference.TableName] if ok && tableReference.Export && shouldFollowReferenceToLink(path, table, tableReference) { tableReferencesBy = append(tableReferencesBy, getTablesExportOrder(schemaMetadata, tableReference, processedTables, path)...) } } return append(append(tableReferences, table), tableReferencesBy...) } // GetRowsFromKeys check if we should move this to a method in the type tableData func GetRowsFromKeys(db *sql.DB, table schemareader.Table, keys []TableKey) [][]sqlUtil.RowDataStructure { if len(keys) == 0 { return make([][]sqlUtil.RowDataStructure, 0) } formattedColumns := strings.Join(table.Columns, ", ") columnsFilter := make([]string, 0) for _, value := range keys[0].Key { columnsFilter = append(columnsFilter, value.Column) } values := make([]string, 0) for _, key := range keys { row := make([]string, 0) for _, c := range columnsFilter { for _, x := range key.Key { if x.Column == c { row = append(row, x.Value) break } } } values = append(values, "("+strings.Join(row, ",")+")") } // when columnsFilter is empty, do not append any where clause to prevent sql syntax error // TODO: how it can happen to have no columnFilter when keys check at the beginning? where_clause := "" if len(columnsFilter) > 0 { where_clause = fmt.Sprintf("WHERE (%s) IN (%s)", strings.Join(columnsFilter, ", "), strings.Join(values, ",")) } sql := fmt.Sprintf(`SELECT %s FROM %s %s;`, formattedColumns, table.Name, where_clause) return sqlUtil.ExecuteQueryWithResults(db, sql) } func filterRowData(value []sqlUtil.RowDataStructure, table schemareader.Table) []sqlUtil.RowDataStructure { if table.RowModCallback != nil { value = table.RowModCallback(value, table) } if table.UnexportColumns != nil { returnValues := make([]sqlUtil.RowDataStructure, 0) for _, row := range value { _, ok := table.UnexportColumns[row.ColumnName] if !ok { returnValues = append(returnValues, row) } } return returnValues } return value } func substituteKeys(db *sql.DB, table schemareader.Table, row []sqlUtil.RowDataStructure, tableMap map[string]schemareader.Table) []sqlUtil.RowDataStructure { values := substitutePrimaryKey(table, row) values = SubstituteForeignKey(db, table, tableMap, values) return values } func substitutePrimaryKey(table schemareader.Table, row []sqlUtil.RowDataStructure) []sqlUtil.RowDataStructure { rowResult := make([]sqlUtil.RowDataStructure, 0) pkSequence := false if len(table.PKSequence) > 0 { pkSequence = true } for _, column := range row { if pkSequence && table.PKColumns[column.ColumnName] && len(table.PKColumns) == 1 { column.ColumnType = "SQL" column.Value = fmt.Sprintf("SELECT nextval('%s')", table.PKSequence) rowResult = append(rowResult, column) } else { rowResult = append(rowResult, column) } } return rowResult } func SubstituteForeignKey(db *sql.DB, table schemareader.Table, tables map[string]schemareader.Table, row []sqlUtil.RowDataStructure) []sqlUtil.RowDataStructure { for _, reference := range table.References { row = substituteForeignKeyReference(db, table, tables, reference, row) } return row } func substituteForeignKeyReference(db *sql.DB, table schemareader.Table, tables map[string]schemareader.Table, reference schemareader.Reference, row []sqlUtil.RowDataStructure) []sqlUtil.RowDataStructure { foreignTable := tables[reference.TableName] foreignMainUniqueColumns := foreignTable.UniqueIndexes[foreignTable.MainUniqueIndexName].Columns localColumns := make([]string, 0) foreignColumns := make([]string, 0) whereParameters := make([]string, 0) scanParameters := make([]interface{}, 0) for localColumn, foreignColumn := range reference.ColumnMapping { localColumns = append(localColumns, localColumn) foreignColumns = append(foreignColumns, foreignColumn) whereParameters = append(whereParameters, fmt.Sprintf("%s = $%d", foreignColumn, len(whereParameters)+1)) scanParameters = append(scanParameters, row[table.ColumnIndexes[localColumn]].Value) } formattedColumns := strings.Join(foreignTable.Columns, ", ") formattedWhereParameters := strings.Join(whereParameters, " AND ") sql := fmt.Sprintf(`SELECT %s FROM %s WHERE %s;`, formattedColumns, reference.TableName, formattedWhereParameters) key := fmt.Sprintf("%s,%s,%s", reference.TableName, formattedWhereParameters, scanParameters) cachedValue, found := cache[key] if found { //Assuming there will be one entry in reference.ColumnMapping row[table.ColumnIndexes[localColumns[0]]].Value = cachedValue row[table.ColumnIndexes[localColumns[0]]].ColumnType = "SQL" } else { rows := sqlUtil.ExecuteQueryWithResults(db, sql, scanParameters...) // we will only change for a sub query if we were able to find the target Value // other wise we keep the pre existing Value. // this can happen when the column for the reference is null. Example rhnchanel->org_id if len(rows) > 0 { whereParameters = make([]string, 0) countVal, findCountVal := referrencesCall[reference.TableName] if !findCountVal { countVal = 0 } countVal++ referrencesCall[reference.TableName] = countVal for _, foreignColumn := range foreignMainUniqueColumns { // produce the where clause for _, c := range rows[0] { if strings.Compare(c.ColumnName, foreignColumn) == 0 { if c.Value == nil { whereParameters = append(whereParameters, fmt.Sprintf("%s IS NULL", foreignColumn)) } else { foreignReference := foreignTable.GetFirstReferenceFromColumn(foreignColumn) if strings.Compare(foreignReference.TableName, "") == 0 { whereParameters = append(whereParameters, fmt.Sprintf("%s = %s", foreignColumn, formatField(c))) } else { //copiedrow := make([]sqlUtil.RowDataStructure, len(rows[0])) //copy(copiedrow, rows[0]) rowResultTemp := substituteForeignKeyReference(db, foreignTable, tables, foreignReference, rows[0]) fieldToUpdate := formatField(c) for _, field := range rowResultTemp { if strings.Compare(field.ColumnName, foreignColumn) == 0 { fieldToUpdate = formatField(field) break } } whereParameters = append(whereParameters, fmt.Sprintf("%s = %s", foreignColumn, fieldToUpdate)) } } break } } } for localColumn, foreignColumn := range reference.ColumnMapping { updateSql := fmt.Sprintf(`SELECT %s FROM %s WHERE %s LIMIT 1`, foreignColumn, reference.TableName, strings.Join(whereParameters, " AND ")) row[table.ColumnIndexes[localColumn]].Value = updateSql row[table.ColumnIndexes[localColumn]].ColumnType = "SQL" cache[key] = updateSql } } } return row } func formatRowValue(value []sqlUtil.RowDataStructure) string { result := make([]string, 0) for _, col := range value { result = append(result, formatField(col)) } return strings.Join(result, ",") } func formatField(col sqlUtil.RowDataStructure) string { if col.Value == nil { return "null" } val := "" switch col.ColumnType { case "NUMERIC": val = fmt.Sprintf(`%s`, col.Value) case "TIMESTAMPTZ", "TIMESTAMP": val = pq.QuoteLiteral(string(pq.FormatTimestamp(col.Value.(time.Time)))) case "SQL": val = fmt.Sprintf(`(%s)`, col.Value) default: val = pq.QuoteLiteral(fmt.Sprintf("%s", col.Value)) } return val } func formatColumnAssignment(table schemareader.Table) string { assignments := make([]string, 0) for _, column := range table.Columns { if !table.PKColumns[column] && !table.UnexportColumns[column] { assignments = append(assignments, fmt.Sprintf("%s = excluded.%s", column, column)) } } return strings.Join(assignments, ",") } func formatOnConflict(row []sqlUtil.RowDataStructure, table schemareader.Table) string { constraint := "(" + strings.Join(table.UniqueIndexes[table.MainUniqueIndexName].Columns, ", ") + ")" switch table.Name { case "rhnerrataseverity": constraint = "(id)" case "rhnconfiginfo": constraints := map[string]string{ "rhn_confinfo_ugf_se_uq": "(username, groupname, filemode, selinux_ctx) WHERE username IS NOT NULL AND groupname IS NOT NULL AND filemode IS NOT NULL AND selinux_ctx IS NOT NULL AND symlink_target_filename_id IS NULL", "rhn_confinfo_ugf_uq": "(username, groupname, filemode) WHERE username IS NOT NULL AND groupname IS NOT NULL AND filemode IS NOT NULL AND selinux_ctx IS NULL AND symlink_target_filename_id IS NULL", "rhn_confinfo_s_se_uq": "(symlink_target_filename_id, selinux_ctx) WHERE username IS NULL AND groupname IS NULL AND filemode IS NULL AND selinux_ctx IS NOT NULL AND symlink_target_filename_id IS NOT NULL", "rhn_confinfo_s_uq": "(symlink_target_filename_id) WHERE username IS NULL AND groupname IS NULL AND filemode IS NULL AND selinux_ctx IS NULL AND symlink_target_filename_id IS NOT NULL", } // Only username and selinux_ctx columns matter to differentiate between indexes columns := map[string]bool{ "username": false, "selinux_ctx": false, } // Go through all the columns first in case the columns come unordered for _, col := range row { if (col.ColumnName == "username" || col.ColumnName == "selinux_ctx") && col.Value != nil { columns[col.ColumnName] = true } } constraint = constraints["rhn_confinfo_s_uq"] if columns["username"] && columns["selinux_ctx"] { constraint = constraints["rhn_confinfo_ugf_se_uq"] } if !columns["username"] && columns["selinux_ctx"] { constraint = constraints["rhn_confinfo_s_se_uq"] } if columns["username"] && !columns["selinux_ctx"] { constraint = constraints["rhn_confinfo_ugf_uq"] } case "rhnerrata": // TODO rhnerrata and rhnpackageevr logic is similar, so we extract to one method on future var orgId interface{} = nil for _, field := range row { if strings.Compare(field.ColumnName, "org_id") == 0 { orgId = field.Value break } } if orgId == nil { constraint = "(advisory) WHERE org_id IS NULL" } else { constraint = "(advisory, org_id) WHERE org_id IS NOT NULL" } case "rhnpackageevr": var epoch interface{} = nil for _, field := range row { if strings.Compare(field.ColumnName, "epoch") == 0 { epoch = field.Value } } if epoch == nil { return "(version, release, ((evr).type)) WHERE epoch IS NULL DO NOTHING" } else { return "(version, release, epoch, ((evr).type)) WHERE epoch IS NOT NULL DO NOTHING" } case "rhndistchannelmap": //TODO similar to rhnerrata var orgId interface{} = nil for _, field := range row { if strings.Compare(field.ColumnName, "org_id") == 0 { orgId = field.Value break } } if orgId == nil { constraint = "(release, channel_arch_id) WHERE org_id IS NULL" } else { constraint = "(release, channel_arch_id, org_id) WHERE org_id IS NOT NULL" } } columnAssignment := formatColumnAssignment(table) return fmt.Sprintf("%s DO UPDATE SET %s", constraint, columnAssignment) } func generateClearTable(db *sql.DB, writer *bufio.Writer, table schemareader.Table, path []string, schemaMetadata map[string]schemareader.Table, options PrintSqlOptions) { // generates the delete statement for the table existingRecords := buildQueryToGetExistingRecords(path, table, schemaMetadata, options.CleanWhereClause) mainUniqueColumns := strings.Join(table.UniqueIndexes[table.MainUniqueIndexName].Columns, ",") cleanEmptyTable := fmt.Sprintf("\nDELETE FROM %s WHERE (%s) IN (%s);", table.Name, mainUniqueColumns, existingRecords) writer.WriteString(cleanEmptyTable + "\n") // repopulate all pre-existing data allTableRecordsSql := fmt.Sprintf("SELECT * FROM %s WHERE (%s) IN (%s);", table.Name, mainUniqueColumns, existingRecords) allTableRecords := sqlUtil.ExecuteQueryWithResults(db, allTableRecordsSql) for _, record := range allTableRecords { insertStatement := generateRowInsertStatement(db, record, table, schemaMetadata, []string{table.Name}) writer.WriteString(insertStatement + "\n") //fmt.Println(insertStatement) } } func buildQueryToGetExistingRecords(path []string, table schemareader.Table, schemaMetadata map[string]schemareader.Table, cleanWhereClause string) string { mainUniqueColumns := "" for _, column := range table.UniqueIndexes[table.MainUniqueIndexName].Columns { if len(mainUniqueColumns) > 0 { mainUniqueColumns = mainUniqueColumns + ", " } mainUniqueColumns = mainUniqueColumns + table.Name + "." + column } joinsClause := getJoinsClause(path, schemaMetadata) return fmt.Sprintf(`SELECT %s FROM %s %s %s`, mainUniqueColumns, table.Name, joinsClause, cleanWhereClause) } func getJoinsClause(path []string, schemaMetadata map[string]schemareader.Table) string { var result strings.Builder reversePath := make([]string, len(path)) copy(reversePath, path) utils.ReverseArray(reversePath) for i := 0; i < len(reversePath)-1; i++ { firstTable := reversePath[i] secondTable := reversePath[i+1] reverseRelationLookup := false relationFound := findRelationInfo(schemaMetadata[firstTable].ReferencedBy, firstTable, secondTable) if relationFound == nil { relationFound = findRelationInfo(schemaMetadata[firstTable].References, firstTable, secondTable) reverseRelationLookup = true } for key, value := range relationFound { if reverseRelationLookup { result.WriteString(fmt.Sprintf(` INNER JOIN %s on %s.%s = %s.%s`, secondTable, secondTable, value, firstTable, key)) } else { result.WriteString(fmt.Sprintf(` INNER JOIN %s on %s.%s = %s.%s`, secondTable, secondTable, key, firstTable, value)) } } } return result.String() } func findRelationInfo(References []schemareader.Reference, sourceTable string, tableToFind string) map[string]string { for _, reference := range References { if reference.TableName == tableToFind { // we can try to generalize it by check if PK is in fact a foreign key. // for now, since is a single case, I will hardcode it if strings.Compare("rhnchannelcloned", sourceTable) == 0 { if _, ok := reference.ColumnMapping["original_id"]; ok { continue } } return reference.ColumnMapping } } return nil } func prepareColumnNames(table schemareader.Table) string { returnColumn := "" for _, column := range table.Columns { _, ignore := table.UnexportColumns[column] if !ignore { if len(returnColumn) == 0 { returnColumn = returnColumn + column } else { returnColumn = returnColumn + ", " + column } } } return returnColumn } func generateRowInsertStatement(db *sql.DB, values []sqlUtil.RowDataStructure, table schemareader.Table, schemaMetadata map[string]schemareader.Table, onlyIfParentExistsTables []string) string { tableName := table.Name columnNames := prepareColumnNames(table) rowKeysProcessed := substituteKeys(db, table, values, schemaMetadata) valueFiltered := filterRowData(rowKeysProcessed, table) if strings.Compare(table.MainUniqueIndexName, schemareader.VirtualIndexName) == 0 || utils.Contains(onlyIfParentExistsTables, table.Name) { whereClauseList := make([]string, 0) for _, indexColumn := range table.UniqueIndexes[table.MainUniqueIndexName].Columns { for _, value := range valueFiltered { if strings.Compare(indexColumn, value.ColumnName) == 0 { if value.Value == nil { whereClauseList = append(whereClauseList, fmt.Sprintf(" %s IS NULL", value.ColumnName)) } else { whereClauseList = append(whereClauseList, fmt.Sprintf(" %s = %s", value.ColumnName, formatField(value))) } } } } whereClause := strings.Join(whereClauseList, " AND ") if utils.Contains(onlyIfParentExistsTables, table.Name) { parentsRecordsCheckList := make([]string, 0) for _, reference := range table.References { for localColumn, _ := range reference.ColumnMapping { for _, value := range valueFiltered { if strings.Compare(localColumn, value.ColumnName) == 0 { if value.Value != nil && value.ColumnType == "SQL" { parentsRecordsCheckList = append(parentsRecordsCheckList, fmt.Sprintf("EXISTS %s", formatField(value))) } } } } } parentRecordsExistsClause := strings.Join(parentsRecordsCheckList, " AND ") return fmt.Sprintf(`INSERT INTO %s (%s) SELECT %s WHERE NOT EXISTS (SELECT 1 FROM %s WHERE %s) AND %s;`, tableName, columnNames, formatRowValue(valueFiltered), tableName, whereClause, parentRecordsExistsClause) } return fmt.Sprintf(`INSERT INTO %s (%s) SELECT %s WHERE NOT EXISTS (SELECT 1 FROM %s WHERE %s);`, tableName, columnNames, formatRowValue(valueFiltered), tableName, whereClause) } else { onConflictFormatted := formatOnConflict(valueFiltered, table) return fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s) ON CONFLICT %s;`, tableName, columnNames, formatRowValue(valueFiltered), onConflictFormatted) } } 0707010000001C000081A4000003E800000064000000016613BCD000000ED3000000000000000000000000000000000000002D00000000inter-server-sync/dumper/dumpAllTableData.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "bufio" "database/sql" "fmt" "strings" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) func DumpAllTablesData(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, startingTables []schemareader.Table, whereFilterClause func(table schemareader.Table) string, onlyIfParentExistsTables []string) { // exporting from the starting tables. processedTables := DumpReachableTablesData(db, writer, schemaMetadata, startingTables, whereFilterClause, onlyIfParentExistsTables, make(map[string]bool)) // Export tables not visited when exporting the starting tables for schemaTableName, schemaTable := range schemaMetadata { if !schemaTable.Export { continue } _, ok := processedTables[schemaTableName] if ok { continue } exportAllTableData(db, writer, schemaMetadata, schemaTable, whereFilterClause, onlyIfParentExistsTables) } } func DumpReachableTablesData(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, startingTables []schemareader.Table, whereFilterClause func(table schemareader.Table) string, onlyIfParentExistsTables []string, processedTables map[string]bool) map[string]bool { for _, startingTable := range startingTables { _, ok := processedTables[startingTable.Name] if ok { continue } processedTables = processTableDataWithLinks(db, writer, schemaMetadata, startingTable, whereFilterClause, processedTables, make([]string, 0), onlyIfParentExistsTables) } return processedTables } func processTableDataWithLinks(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, whereFilterClause func(table schemareader.Table) string, processedTables map[string]bool, path []string, onlyIfParentExistsTables []string) map[string]bool { log.Trace().Msgf("Processing table: %s", table.Name) _, tableProcessed := processedTables[table.Name] currentTable := schemaMetadata[table.Name] if tableProcessed || !currentTable.Export { return processedTables } path = append(path, table.Name) processedTables[table.Name] = true for _, reference := range table.References { tableReference, ok := schemaMetadata[reference.TableName] if !ok || !tableReference.Export { continue } log.Trace().Msgf("Table processed: %s", table.Name) processTableDataWithLinks(db, writer, schemaMetadata, tableReference, whereFilterClause, processedTables, path, onlyIfParentExistsTables) } exportAllTableData(db, writer, schemaMetadata, table, whereFilterClause, onlyIfParentExistsTables) for _, reference := range table.ReferencedBy { tableReference, ok := schemaMetadata[reference.TableName] if !ok || !tableReference.Export { continue } if !shouldFollowReferenceToLink(path, table, tableReference) { continue } processTableDataWithLinks(db, writer, schemaMetadata, tableReference, whereFilterClause, processedTables, path, onlyIfParentExistsTables) } return processedTables } func exportAllTableData(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, whereFilterClause func(table schemareader.Table) string, onlyIfParentExistsTables []string) { log.Trace().Msgf("Exporting data for table %s", table.Name) formattedColumns := strings.Join(table.Columns, ", ") sql := fmt.Sprintf(`SELECT %s FROM %s %s;`, formattedColumns, table.Name, whereFilterClause(table)) rows := sqlUtil.ExecuteQueryWithResults(db, sql) for _, row := range rows { writer.WriteString(generateRowInsertStatement(db, row, table, schemaMetadata, onlyIfParentExistsTables) + "\n") } } 0707010000001D000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000002700000000inter-server-sync/dumper/osImageDumper0707010000001E000081A4000003E800000064000000016613BCD000000703000000000000000000000000000000000000003800000000inter-server-sync/dumper/osImageDumper/osImageDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package osImageDumper import ( "fmt" "os" "path" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/dumper" ) var serverDataFolder = "/srv/www/os-images/" //FIXME: we have no relation from db tables to actial data so for now copy content of serverDataFolder //func DumpOsImages(db *sql.DB, schemaMetadata map[string]schemareader.Table, data dumper.DataDumper, outputFolder string) { func DumpOsImages(outputFolder string, orgIds []uint) { log.Debug().Msg("Images data dump") imagesDir, err := os.Open(serverDataFolder) if err != nil { log.Fatal().Err(err) } defer imagesDir.Close() orgDirInfo, err := imagesDir.ReadDir(-1) if len(orgIds) == 0 { orgIds = []uint{0} } for _, org := range orgDirInfo { for _, orgId := range orgIds { if org.Type().IsDir() && (orgId == 0 || org.Name() == fmt.Sprint(orgId)) { var orgDirPath = path.Join(serverDataFolder, org.Name()) orgDir, err := os.Open(orgDirPath) if err != nil { log.Fatal().Err(err) } defer orgDir.Close() orgDirInfo, err := orgDir.ReadDir(-1) for _, image := range orgDirInfo { if image.Type().IsRegular() { DumpOsImage(path.Join(outputFolder, org.Name(), image.Name()), path.Join(orgDirPath, image.Name())) } } } } } } func DumpOsImage(outputFolder string, source string) { log.Trace().Msgf("Copying image %s to %s", source, outputFolder) _, err := dumper.Copy(source, outputFolder) if err != nil { log.Fatal().Err(err) } } func GetImagePathForImage(filepath string, org_id string, prefixOpt ...string) string { prefix := serverDataFolder if len(prefixOpt) > 0 { prefix = prefixOpt[0] } return path.Join(prefix, org_id, filepath) } 0707010000001F000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000002700000000inter-server-sync/dumper/packageDumper07070100000020000081A4000003E800000064000000016613BCD0000006CD000000000000000000000000000000000000003800000000inter-server-sync/dumper/packageDumper/packageDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package packageDumper import ( "database/sql" "fmt" "github.com/rs/zerolog/log" "time" "github.com/uyuni-project/inter-server-sync/dumper" "github.com/uyuni-project/inter-server-sync/schemareader" ) var serverDataFolder = "/var/spacewalk" func DumpPackageFiles(db *sql.DB, schemaMetadata map[string]schemareader.Table, data dumper.DataDumper, outputFolder string) { packageKeysData := data.TableData["rhnpackage"] table := schemaMetadata[packageKeysData.TableName] pathIndex := table.ColumnIndexes["path"] totalPackages := len(packageKeysData.Keys) log.Debug().Msgf("Total package files to copy: %d", totalPackages) exportedpackages := 0 processing := true if log.Debug().Enabled() { go func() { count := 0 for { if !processing { break } time.Sleep(30 * time.Second) log.Debug().Msgf("#count: %d -- #exportedPackageFiles: #%d of %d", count, exportedpackages, totalPackages) count++ } }() } exportPoint := 0 batchSize := 500 for len(packageKeysData.Keys) > exportPoint { upperLimit := exportPoint + batchSize if upperLimit > len(packageKeysData.Keys) { upperLimit = len(packageKeysData.Keys) } rows := dumper.GetRowsFromKeys(db, table, packageKeysData.Keys[exportPoint:upperLimit]) for _, rowPackage := range rows { path := rowPackage[pathIndex] source := fmt.Sprintf("%s/%s", serverDataFolder, path.Value) target := fmt.Sprintf("%s/%s", outputFolder, path.Value) _, error := dumper.Copy(source, target) if error != nil { log.Panic().Err(error).Msg("could not Copy File") } exportedpackages++ } exportPoint = upperLimit } processing = false } 07070100000021000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000002600000000inter-server-sync/dumper/pillarDumper07070100000022000081A4000003E800000064000000016613BCD0000010D7000000000000000000000000000000000000003600000000inter-server-sync/dumper/pillarDumper/pillarDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package pillarDumper import ( "fmt" "os" "os/exec" "path" "path/filepath" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/dumper" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/utils" ) var serverDataDir = "/srv/susemanager/pillar_data/" var replacePattern = "{SERVER_FQDN}" func DumpImagePillars(outputDir string, orgIds []uint, serverConfig string) { log.Debug().Msgf("Dumping pillars to %s", outputDir) fqdn := utils.GetCurrentServerFQDN(serverConfig) sourceDir := filepath.Join(serverDataDir, "images") orgDir, err := os.Open(sourceDir) if err != nil { log.Fatal().Err(err) } defer orgDir.Close() orgDirInfo, err := orgDir.ReadDir(-1) // If orgIds is empty, set it to 0 so all orgs would be exported if len(orgIds) == 0 { orgIds = []uint{0} } for _, org := range orgDirInfo { for _, orgId := range orgIds { if org.Type().IsDir() && (orgId == 0 || org.Name() == fmt.Sprintf("org%d", orgId)) { DumpPillars(path.Join(sourceDir, org.Name()), path.Join(outputDir, org.Name()), fqdn, replacePattern) } } } } func DumpPillars(sourceDir, outputDir, sourceFQDN, targetFQDN string) { log.Trace().Msgf("Pillar dump for %s, replacing FQDN %s", sourceDir, sourceFQDN) pillarDir, err := os.Open(sourceDir) if err != nil { log.Fatal().Err(err) } defer pillarDir.Close() pillarDirInfo, err := pillarDir.ReadDir(-1) for _, pillar := range pillarDirInfo { if pillar.Type().IsRegular() { pillarFilePath := path.Join(sourceDir, pillar.Name()) pillarTargetPath := path.Join(outputDir, pillar.Name()) log.Trace().Msgf("Parsing and copying pillar from %s to %s", pillarFilePath, pillarTargetPath) _, err := dumper.ModifyCopy(pillarFilePath, pillarTargetPath, sourceFQDN, targetFQDN) if err != nil { log.Fatal().Err(err) } os.Chmod(pillarTargetPath, 0640) cmd := exec.Command("chown", "salt:susemanager", pillarTargetPath) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { log.Fatal().Err(err).Msg("Error processing image pillar files") } } } } // 4.2 and older stores pillars in files // image export replaces hostnames in image pillars, we need to replace them to correct SUMA on import func ImportImagePillars(sourceDir string, fqdn string) { log.Debug().Msgf("Importing image pillars from %s", sourceDir) orgDir, err := os.Open(sourceDir) if err != nil { log.Fatal().Err(err) } defer orgDir.Close() orgDirInfo, err := orgDir.ReadDir(-1) for _, org := range orgDirInfo { if org.Type().IsDir() { targetDir := path.Join(serverDataDir, "images", org.Name()) DumpPillars(path.Join(sourceDir, org.Name()), targetDir, replacePattern, fqdn) cmd := exec.Command("chown", "salt:susemanager", targetDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { log.Fatal().Err(err).Msg("Error importing image pillar files") } } } } // 4.3 and newer stores pillars in database // image export replaces hostnames in image pillars, we need to replace them to correct SUMA on import func UpdateImagePillars(serverConfig string) { fqdn := utils.GetCurrentServerFQDN(serverConfig) checkQuery := "SELECT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'susesaltpillar')" db := schemareader.GetDBconnection(serverConfig) rows, err := db.Query(checkQuery) if err != nil { log.Fatal().Err(err).Msgf("Error while executing '%s'", checkQuery) } if !rows.Next() { log.Fatal().Msgf("No return on pillar database table check") } var hasPillars bool err = rows.Scan(&hasPillars) if err != nil { log.Fatal().Err(err).Msgf("Unexpected query result") } if !hasPillars { log.Debug().Msgf("Pillars not backed by database") return } sqlQuery := fmt.Sprintf("UPDATE susesaltpillar SET pillar = REPLACE(pillar::text, '%s', '%s')::jsonb WHERE category LIKE 'Image%%';", replacePattern, fqdn) log.Trace().Msgf("Updating pillar files using query '%s'", sqlQuery) log.Info().Msg("Updating image pillars if needed") rows, err = db.Query(sqlQuery) if err != nil { log.Fatal().Err(err).Msgf("Error updating image pillars") } } 07070100000023000081A4000003E800000064000000016613BCD0000003AF000000000000000000000000000000000000002200000000inter-server-sync/dumper/types.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "bufio" "database/sql" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) type RowKey struct { Column string Value string } type TableKey struct { Key []RowKey } type TableDump struct { TableName string KeyMap map[string]bool Keys []TableKey } type DataDumper struct { TableData map[string]TableDump Paths map[string]bool } type processItem struct { tableName string row []sqlUtil.RowDataStructure path []string } type PrintSqlOptions struct { TablesToClean []string CleanWhereClause string OnlyIfParentExistsTables []string PostOrderCallback Callback } type Callback func(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, data DataDumper) 07070100000024000081A4000003E800000064000000016613BCD000000531000000000000000000000000000000000000002200000000inter-server-sync/dumper/utils.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "fmt" "io" "os" "path/filepath" "strings" ) func Copy(src, dst string) (int64, error) { sourceFileStat, err := os.Stat(src) if err != nil { return 0, err } if !sourceFileStat.Mode().IsRegular() { return 0, fmt.Errorf("%s is not a regular file", src) } source, err := os.Open(src) if err != nil { return 0, err } defer source.Close() destination, err := create(dst) if err != nil { return 0, err } defer destination.Close() nBytes, err := io.Copy(destination, source) return nBytes, err } func ModifyCopy(src, dst, pattern, replace string) (int64, error) { sourceFileStat, err := os.Stat(src) if err != nil { return 0, err } if !sourceFileStat.Mode().IsRegular() { return 0, fmt.Errorf("%s is not a regular file", src) } input, err := os.ReadFile(src) if err != nil { return 0, err } output := strings.ReplaceAll(string(input), pattern, replace) destination, err := create(dst) if err != nil { return 0, err } defer destination.Close() nBytes, err := destination.Write([]byte(output)) return int64(nBytes), err } func create(p string) (*os.File, error) { if err := os.MkdirAll(filepath.Dir(p), 0770); err != nil { return nil, err } return os.Create(p) } 07070100000025000081A4000003E800000064000000016613BCD00000124A000000000000000000000000000000000000002700000000inter-server-sync/dumper/utils_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "bufio" "database/sql" "fmt" "reflect" "strings" "github.com/uyuni-project/inter-server-sync/schemareader" ) type TablesGraph map[string][]string type MetaDataGraph map[string]schemareader.Table // initializeMetaDataGraph creates MetaDataGraph and DataDumper in two separate routines, that traverse the TablesGraph // from the given root in different orders to get the desired setup func initializeMetaDataGraph(graph TablesGraph, root string) (MetaDataGraph, DataDumper) { schemaMetadata, dataDumper := createMetaDataGraph(graph) dataDumper.Paths = allPathsPostOrder(graph, root) return schemaMetadata, dataDumper } // createMetaDataGraph iterates over each key in the map, then over each value under this key, creates a table in the // MetaDataGraph if it does not exist yet, otherwise updates. func createMetaDataGraph(graph TablesGraph) (MetaDataGraph, DataDumper) { schemaMetadata := MetaDataGraph{} dataDumper := DataDumper{ TableData: map[string]TableDump{}, Paths: map[string]bool{}, } var getOrCreateTable = func(name string) schemareader.Table { if _, ok := schemaMetadata[name]; !ok { // create a table and add a referer if there is any indexName := schemareader.VirtualIndexName return schemareader.Table{ Name: name, Export: true, Columns: []string{"id"}, PKColumns: map[string]bool{"id": true}, ColumnIndexes: map[string]int{"id": 0}, MainUniqueIndexName: indexName, UniqueIndexes: map[string]schemareader.UniqueIndex{indexName: {indexName, []string{"id"}}}, References: []schemareader.Reference{}, ReferencedBy: []schemareader.Reference{}, } } else { return schemaMetadata[name] } } for parent, children := range graph { var parentTable = getOrCreateTable(parent) for _, child := range children { columnKey := child + "_fk_id" parentTable.Columns = append(parentTable.Columns, columnKey) parentTable.ColumnIndexes[columnKey] = len(parentTable.Columns) - 1 parentTable.References = append( parentTable.References, schemareader.Reference{TableName: child, ColumnMapping: map[string]string{columnKey: "id"}}, ) childTable := getOrCreateTable(child) childTable.ReferencedBy = append( childTable.ReferencedBy, schemareader.Reference{TableName: parent, ColumnMapping: map[string]string{columnKey: "id"}}, ) schemaMetadata[child] = childTable k := []RowKey{{"id", fmt.Sprintf("'%04d'", 1)}} dataDumper.TableData[child] = TableDump{ TableName: child, KeyMap: map[string]bool{fmt.Sprintf("'%04d'", 1): true}, Keys: []TableKey{{Key: k}}, } } schemaMetadata[parent] = parentTable k := []RowKey{{"id", fmt.Sprintf("'%04d'", 1)}} dataDumper.TableData[parent] = TableDump{ TableName: parent, KeyMap: map[string]bool{fmt.Sprintf("'%04d'", 1): true}, Keys: []TableKey{{Key: k}}, } } return schemaMetadata, dataDumper } func allPathsPostOrder(graph TablesGraph, root string) map[string]bool { var node string var path []string stack := []string{root} visited := map[string]bool{} result := map[string]bool{} for len(stack) > 0 { // pop the next node from the stack node, stack = stack[0], stack[1:] // there are circular dependencies, so we need to check if we've been there yet if _, ok := visited[node]; ok { // rewind from the current depth path = path[:len(stack)] continue } visited[node] = true path = append(path, node) result[strings.Join(path, ",")] = true children := graph[node] // if reached a leaf if len(children) == 0 { // make one step back path = path[:len(path)-1] } reverse(children) stack = append(children, stack...) } return result } // setNumberOfRecordsForTable takes a generic mocked DataDumper object and simulates a case where a table X has Y records func setNumberOfRecordsForTable(tc *writerTestCase, tableName string, num int) { var keys []TableKey for i := 0; i < num; i++ { k := []RowKey{{"id", fmt.Sprintf("%04d", i+1)}} keys = append(keys, TableKey{Key: k}) } tableData := tc.dumper.TableData[tableName] tableData.Keys = keys tc.dumper.TableData[tableName] = tableData } func reverse(s interface{}) { n := reflect.ValueOf(s).Len() swap := reflect.Swapper(s) for i, j := 0, n-1; i < j; i, j = i+1, j-1 { swap(i, j) } } func createCallback() Callback { return func(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, data DataDumper) { } } 07070100000026000081A4000003E800000064000000016613BCD0000040E3000000000000000000000000000000000000002800000000inter-server-sync/dumper/writer_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package dumper import ( "fmt" "reflect" "strings" "testing" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" "github.com/uyuni-project/inter-server-sync/tests" ) // writerTestCase is a general object for each dumper's recursive method type writerTestCase struct { repo *tests.DataRepository schemaMetadata MetaDataGraph startingTable schemareader.Table dumper DataDumper whereFilterClause func(table schemareader.Table) string processedTables map[string]bool path []string onlyIfParentExistsTables []string options PrintSqlOptions } func TestPrintAllTableData(t *testing.T) { // 01 Arrange graph := TablesGraph{ // first order "root": []string{"v01", "v02"}, "v01": []string{"v05"}, "v02": []string{"v03"}, // second order "v03": []string{"v04"}, // third order with circular dependency "v04": []string{"v05"}, "v05": []string{"v04"}, } root := "root" testCase := createTestCase(graph, root, PrintSqlOptions{}) // the data repository expect these statements in the exact same order testCase.repo.Expect("SELECT id, v05_fk_id FROM v04 ;", testCase.schemaMetadata["v04"].Columns, 1) testCase.repo.Expect("SELECT id, v04_fk_id FROM v05 WHERE id = $1;", testCase.schemaMetadata["v05"].Columns, 1) testCase.repo.Expect("SELECT id, v04_fk_id FROM v05 ;", testCase.schemaMetadata["v05"].Columns, 1) testCase.repo.Expect("SELECT id, v05_fk_id FROM v04 WHERE id = $1;", testCase.schemaMetadata["v04"].Columns, 1) testCase.repo.Expect("SELECT id, v05_fk_id FROM v01 ;", testCase.schemaMetadata["v01"].Columns, 1) testCase.repo.Expect("SELECT id, v04_fk_id FROM v03 ;", testCase.schemaMetadata["v03"].Columns, 1) testCase.repo.Expect("SELECT id, v03_fk_id FROM v02 ;", testCase.schemaMetadata["v02"].Columns, 1) testCase.repo.Expect("SELECT id, v04_fk_id FROM v03 WHERE id = $1;", testCase.schemaMetadata["v03"].Columns, 1) testCase.repo.Expect("SELECT id, v01_fk_id, v02_fk_id FROM root ;", testCase.schemaMetadata["root"].Columns, 1) testCase.repo.Expect("SELECT id, v05_fk_id FROM v01 WHERE id = $1;", testCase.schemaMetadata["v01"].Columns, 1) testCase.repo.Expect("SELECT id, v03_fk_id FROM v02 WHERE id = $1;", testCase.schemaMetadata["v02"].Columns, 1) // 02 Act result := processTableDataWithLinks( testCase.repo.DB, testCase.repo.Writer, testCase.schemaMetadata, testCase.startingTable, testCase.whereFilterClause, testCase.processedTables, testCase.path, testCase.onlyIfParentExistsTables, ) // 03 Assert if result == nil { t.Errorf("processedTables is nil") } for node, isExported := range result { if !isExported { t.Errorf(fmt.Sprintf("Node %v is not exported!", node)) } } // checks if all expected statements were indeed executed against the db if err := testCase.repo.ExpectationsWereMet(); err != nil { t.Errorf("Some nodes left unexported. Error message: %s", err) } } func TestPrintCleanTables(t *testing.T) { // 01 Arrange graph := TablesGraph{ // first order "root": []string{"v11", "v12"}, "v11": []string{"v15", "v16"}, "v12": []string{"v13"}, // second order "v13": []string{"v14"}, // third order with circular dependency "v14": []string{"v15", "v16"}, "v15": []string{"v14"}, "v16": []string{}, } keys := make([]string, 0, len(graph)) for k := range graph { keys = append(keys, k) } root := "root" testCase := createTestCase( graph, root, PrintSqlOptions{TablesToClean: keys}, ) testCase.repo.Expect("SELECT * FROM root WHERE (id) IN (SELECT root.id FROM root );", testCase.schemaMetadata["root"].Columns, 1) testCase.repo.Expect("SELECT id, v15_fk_id, v16_fk_id FROM v11 WHERE id = $1;", testCase.schemaMetadata["v11"].Columns, 1) testCase.repo.Expect("SELECT id, v13_fk_id FROM v12 WHERE id = $1;", testCase.schemaMetadata["v12"].Columns, 1) testCase.repo.Expect("SELECT * FROM v11 WHERE (id) IN (SELECT v11.id FROM v11 "+ "INNER JOIN root on root.v11_fk_id = v11.id );", testCase.schemaMetadata["v11"].Columns, 1) testCase.repo.Expect("SELECT id, v14_fk_id FROM v15 WHERE id = $1;", testCase.schemaMetadata["v15"].Columns, 1) testCase.repo.Expect("SELECT id FROM v16 WHERE id = $1;", testCase.schemaMetadata["v16"].Columns, 1) testCase.repo.Expect("SELECT * FROM v15 WHERE (id) IN (SELECT v15.id FROM v15 "+ "INNER JOIN v11 on v11.v15_fk_id = v15.id "+ "INNER JOIN root on root.v11_fk_id = v11.id );", testCase.schemaMetadata["v15"].Columns, 1) testCase.repo.Expect("SELECT id, v15_fk_id, v16_fk_id FROM v14 WHERE id = $1;", testCase.schemaMetadata["v14"].Columns, 1) testCase.repo.Expect("SELECT * FROM v14 WHERE (id) IN (SELECT v14.id FROM v14 "+ "INNER JOIN v15 on v15.v14_fk_id = v14.id "+ "INNER JOIN v11 on v11.v15_fk_id = v15.id "+ "INNER JOIN root on root.v11_fk_id = v11.id );", testCase.schemaMetadata["v14"].Columns, 1) testCase.repo.Expect("SELECT * FROM v16 WHERE (id) IN (SELECT v16.id FROM v16 "+ "INNER JOIN v14 on v14.v16_fk_id = v16.id "+ "INNER JOIN v15 on v15.v14_fk_id = v14.id "+ "INNER JOIN v11 on v11.v15_fk_id = v15.id "+ "INNER JOIN root on root.v11_fk_id = v11.id );", testCase.schemaMetadata["v16"].Columns, 1) testCase.repo.Expect("SELECT * FROM v12 WHERE (id) IN (SELECT v12.id FROM v12 "+ "INNER JOIN root on root.v12_fk_id = v12.id );", testCase.schemaMetadata["v12"].Columns, 1) testCase.repo.Expect("SELECT id, v14_fk_id FROM v13 WHERE id = $1;", testCase.schemaMetadata["v13"].Columns, 1) testCase.repo.Expect("SELECT * FROM v13 WHERE (id) IN (SELECT v13.id FROM v13 "+ "INNER JOIN v12 on v12.v13_fk_id = v13.id "+ "INNER JOIN root on root.v12_fk_id = v12.id );", testCase.schemaMetadata["v13"].Columns, 1) expectedWrittenBuffer := []string{ "" + "\n" + "DELETE FROM root WHERE (id) IN (SELECT root.id FROM root );" + "\n" + "INSERT INTO root (id, v11_fk_id, v12_fk_id)\t" + "SELECT '0001',(SELECT id FROM v11 WHERE id = '0001' LIMIT 1),(SELECT id FROM v12 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM root WHERE id = '0001')" + " AND EXISTS (SELECT id FROM v11 WHERE id = '0001' LIMIT 1) AND EXISTS (SELECT id FROM v12 WHERE id = '0001' LIMIT 1);" + "\n" + "\n" + "DELETE FROM v11 WHERE (id) IN (SELECT v11.id FROM v11 INNER JOIN root on root.v11_fk_id = v11.id );" + "\n" + "INSERT INTO v11 (id, v15_fk_id, v16_fk_id)\t" + "SELECT '0001',(SELECT id FROM v15 WHERE id = '0001' LIMIT 1),(SELECT id FROM v16 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM v11 WHERE id = '0001') " + "AND EXISTS (SELECT id FROM v15 WHERE id = '0001' LIMIT 1) AND EXISTS (SELECT id FROM v16 WHERE id = '0001' LIMIT 1);" + "\n" + "\n" + "DELETE FROM v15 WHERE (id) IN " + "(SELECT v15.id FROM v15 INNER JOIN v11 on v11.v15_fk_id = v15.id INNER JOIN root on root.v11_fk_id = v11.id );" + "\n" + "INSERT INTO v15 (id, v14_fk_id)\t" + "SELECT '0001',(SELECT id FROM v14 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM v15 WHERE id = '0001') " + "AND EXISTS (SELECT id FROM v14 WHERE id = '0001' LIMIT 1);" + "\n" + "\n" + "DELETE FROM v14 WHERE (id) IN " + "(SELECT v14.id FROM v14 " + "INNER JOIN v15 on v15.v14_fk_id = v14.id " + "INNER JOIN v11 on v11.v15_fk_id = v15.id " + "INNER JOIN root on root.v11_fk_id = v11.id );" + "\n" + "INSERT INTO v14 (id, v15_fk_id, v16_fk_id)\t" + "SELECT '0001',(SELECT id FROM v15 WHERE id = '0001' LIMIT 1),(SELECT id FROM v16 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM v14 WHERE id = '0001') " + "AND EXISTS (SELECT id FROM v15 WHERE id = '0001' LIMIT 1) AND EXISTS (SELECT id FROM v16 WHERE id = '0001' LIMIT 1);" + "\n" + "\n" + "DELETE FROM v16 WHERE (id) IN " + "(SELECT v16.id FROM v16 " + "INNER JOIN v14 on v14.v16_fk_id = v16.id " + "INNER JOIN v15 on v15.v14_fk_id = v14.id " + "INNER JOIN v11 on v11.v15_fk_id = v15.id " + "INNER JOIN root on root.v11_fk_id = v11.id );" + "\n" + "INSERT INTO v16 (id)\t" + "SELECT '0001' " + "WHERE NOT EXISTS (SELECT 1 FROM v16 WHERE id = '0001') " + "AND ;" + "\n" + "\n" + "DELETE FROM v12 WHERE (id) IN " + "(SELECT v12.id FROM v12 " + "INNER JOIN root on root.v12_fk_id = v12.id );" + "\n" + "INSERT INTO v12 (id, v13_fk_id)\t" + "SELECT '0001',(SELECT id FROM v13 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM v12 WHERE id = '0001') " + "AND EXISTS (SELECT id FROM v13 WHERE id = '0001' LIMIT 1);" + "\n" + "\n" + "DELETE FROM v13 WHERE (id) IN " + "(SELECT v13.id FROM v13 " + "INNER JOIN v12 on v12.v13_fk_id = v13.id " + "INNER JOIN root on root.v12_fk_id = v12.id );" + "\n" + "INSERT INTO v13 (id, v14_fk_id)\t" + "SELECT '0001',(SELECT id FROM v14 WHERE id = '0001' LIMIT 1) " + "WHERE NOT EXISTS (SELECT 1 FROM v13 WHERE id = '0001') " + "AND EXISTS (SELECT id FROM v14 WHERE id = '0001' LIMIT 1);" + "\n", } // 02 Act printCleanTables( testCase.repo.DB, testCase.repo.Writer, testCase.schemaMetadata, testCase.startingTable, testCase.processedTables, testCase.path, testCase.options, ) writtenBuffer := testCase.repo.GetWriterBuffer() // 03 Assert if testCase.processedTables == nil { t.Errorf("processedTables is nil") } buffersEqual := reflect.DeepEqual(writtenBuffer, expectedWrittenBuffer) if !buffersEqual { t.Errorf("Buffers are not equal") } for node, isExported := range testCase.processedTables { if !isExported { t.Errorf(fmt.Sprintf("Node %v is not exported!", node)) } } // checks if all expected statements were indeed executed against the db if err := testCase.repo.ExpectationsWereMet(); err != nil { t.Errorf("Some nodes left unexported. Error message: %s", err) } } func TestPrintTableData(t *testing.T) { // 01 Arrange graph := TablesGraph{ // first order "root": []string{"v21", "v22"}, "v21": []string{"v25", "v26"}, "v22": []string{"v23"}, // second order "v23": []string{"v24"}, // third order with circular dependency "v24": []string{"v25", "v26"}, "v25": []string{"v24"}, "v26": []string{}, } root := "root" testCase := createTestCase(graph, root, PrintSqlOptions{PostOrderCallback: createCallback()}) // the data repository expect these statements in the exact same order testCase.repo.Expect("SELECT id FROM v26 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v26"].Columns, 1) testCase.repo.Expect("SELECT id, v25_fk_id, v26_fk_id FROM v24 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v24"].Columns, 1) testCase.repo.Expect("SELECT id, v24_fk_id FROM v25 WHERE id = $1;", testCase.schemaMetadata["v25"].Columns, 1) testCase.repo.Expect("SELECT id FROM v26 WHERE id = $1;", testCase.schemaMetadata["v26"].Columns, 1) testCase.repo.Expect("SELECT id, v24_fk_id FROM v25 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v25"].Columns, 1) testCase.repo.Expect("SELECT id, v25_fk_id, v26_fk_id FROM v24 WHERE id = $1;", testCase.schemaMetadata["v24"].Columns, 1) testCase.repo.Expect("SELECT id, v25_fk_id, v26_fk_id FROM v21 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v21"].Columns, 1) testCase.repo.Expect("SELECT id, v24_fk_id FROM v23 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v23"].Columns, 1) testCase.repo.Expect("SELECT id, v23_fk_id FROM v22 WHERE (id) IN (('0001'));", testCase.schemaMetadata["v22"].Columns, 1) testCase.repo.Expect("SELECT id, v24_fk_id FROM v23 WHERE id = $1;", testCase.schemaMetadata["v23"].Columns, 1) testCase.repo.Expect("SELECT id, v21_fk_id, v22_fk_id FROM root WHERE (id) IN (('0001'));", testCase.schemaMetadata["root"].Columns, 1) testCase.repo.Expect("SELECT id, v25_fk_id, v26_fk_id FROM v21 WHERE id = $1;", testCase.schemaMetadata["v21"].Columns, 1) testCase.repo.Expect("SELECT id, v23_fk_id FROM v22 WHERE id = $1;", testCase.schemaMetadata["v22"].Columns, 1) // 02 Act orderedTables := getTablesExportOrder(testCase.schemaMetadata, testCase.startingTable, testCase.processedTables, testCase.path) exportTablesData( testCase.repo.DB, testCase.repo.Writer, testCase.schemaMetadata, orderedTables, testCase.dumper, testCase.options, ) // 03 Assert if testCase.processedTables == nil { t.Errorf("processedTables is nil") } for node, isExported := range testCase.processedTables { if !isExported { t.Errorf(fmt.Sprintf("Node %v is not exported!", node)) } } // checks if all expected statements were indeed executed against the db if err := testCase.repo.ExpectationsWereMet(); err != nil { t.Errorf("Some nodes left unexported. Error message: %s", err) } } func TestFormatOnConflict(t *testing.T) { // 01 Arrange row := []sqlUtil.RowDataStructure{ {ColumnName: "org_id", Value: TableDump{}}, } table := schemareader.Table{Name: "rhnerrata"} expectedResult := "(advisory, org_id) WHERE org_id IS NOT NULL DO UPDATE SET " // 02 Act result := formatOnConflict(row, table) // 03 Assert if strings.Compare(result, expectedResult) != 0 { t.Errorf(fmt.Sprintf("Expected %s, but got %s", expectedResult, result)) } } func TestFormatOnConflictRhnConfigInfo(t *testing.T) { // 01 Arrange table := schemareader.Table{Name: "rhnconfiginfo"} type Case struct { row []sqlUtil.RowDataStructure constraint string } testCases := []Case{ { row: []sqlUtil.RowDataStructure{ {ColumnName: "username", Value: nil}, {ColumnName: "groupname", Value: nil}, {ColumnName: "filemode", Value: nil}, {ColumnName: "selinux_ctx", Value: TableDump{}}, {ColumnName: "symlink_target_filename_id", Value: TableDump{}}, }, // rhn_confinfo_s_se_uq constraint: "(symlink_target_filename_id, selinux_ctx) " + "WHERE username IS NULL " + "AND groupname IS NULL " + "AND filemode IS NULL " + "AND selinux_ctx IS NOT NULL " + "AND symlink_target_filename_id IS NOT NULL", }, { row: []sqlUtil.RowDataStructure{ {ColumnName: "username", Value: nil}, {ColumnName: "groupname", Value: nil}, {ColumnName: "filemode", Value: nil}, {ColumnName: "selinux_ctx", Value: nil}, {ColumnName: "symlink_target_filename_id", Value: TableDump{}}, }, // rhn_confinfo_s_uq constraint: "(symlink_target_filename_id) " + "WHERE username IS NULL " + "AND groupname IS NULL " + "AND filemode IS NULL " + "AND selinux_ctx IS NULL " + "AND symlink_target_filename_id IS NOT NULL", }, { row: []sqlUtil.RowDataStructure{ {ColumnName: "username", Value: TableDump{}}, {ColumnName: "groupname", Value: TableDump{}}, {ColumnName: "filemode", Value: TableDump{}}, {ColumnName: "selinux_ctx", Value: TableDump{}}, {ColumnName: "symlink_target_filename_id", Value: nil}, }, // rhn_confinfo_ugf_se_uq constraint: "(username, groupname, filemode, selinux_ctx) " + "WHERE username IS NOT NULL " + "AND groupname IS NOT NULL " + "AND filemode IS NOT NULL " + "AND selinux_ctx IS NOT NULL " + "AND symlink_target_filename_id IS NULL", }, { row: []sqlUtil.RowDataStructure{ {ColumnName: "username", Value: TableDump{}}, {ColumnName: "groupname", Value: TableDump{}}, {ColumnName: "filemode", Value: TableDump{}}, {ColumnName: "selinux_ctx", Value: nil}, {ColumnName: "symlink_target_filename_id", Value: nil}, }, // rhn_confinfo_ugf_uq constraint: "(username, groupname, filemode) " + "WHERE username IS NOT NULL " + "AND groupname IS NOT NULL " + "AND filemode IS NOT NULL " + "AND selinux_ctx IS NULL " + "AND symlink_target_filename_id IS NULL", }, } for i, c := range testCases { // 02 Act result := formatOnConflict(c.row, table) // 03 Assert expected := c.constraint + " DO UPDATE SET " if strings.Compare(result, expected) != 0 { t.Errorf(fmt.Sprintf("Case # %d: expected %s, but got %s", i, expected, result)) } } } // createTestCase is a factory method for writerTestCase func createTestCase(graph TablesGraph, root string, options PrintSqlOptions) writerTestCase { repo := tests.CreateDataRepository() tablesMetaData, dataDumper := initializeMetaDataGraph(graph, root) return writerTestCase{ repo, tablesMetaData, tablesMetaData[root], dataDumper, func(table schemareader.Table) string { return "" }, map[string]bool{}, []string{}, []string{}, options, } } 07070100000027000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001F00000000inter-server-sync/entityDumper07070100000028000081A4000003E800000064000000016613BCD000002551000000000000000000000000000000000000003400000000inter-server-sync/entityDumper/channelDataDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "bufio" "database/sql" "fmt" "io" "os" "strings" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/dumper" "github.com/uyuni-project/inter-server-sync/dumper/packageDumper" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" "github.com/uyuni-project/inter-server-sync/utils" ) // TablesToClean represents Tables which needs to be cleaned in case on client side there is a record that doesn't exist anymore on master side var tablesToClean = []string{"rhnreleasechannelmap", "rhndistchannelmap", "rhnchannelerrata", "rhnchannelpackage", "rhnerratapackage", "rhnerratafile", "rhnerratafilechannel", "rhnerratafilepackage", "rhnerratafilepackagesource", "rhnerratabuglist", "rhnerratacve", "rhnerratakeyword", "susemddata", "suseproductchannel", "rhnchannelcloned", "rhnpackageextratag"} // onlyIfParentExistsTables represents Tables for which only records needs to be insterted only if parent record exists var onlyIfParentExistsTables = []string{"rhnchannelcloned", "rhnerratacloned", "suseproductchannel"} // SoftwareChannelTableNames is the list of names of tables relevant for exporting software channels func SoftwareChannelTableNames() []string { return []string{ // software channel data tables "rhnchannel", "rhnchannelcloned", // add only if there are corresponding rows in rhnchannel "suseproductchannel", // add only if there are corresponding rows in rhnchannel // clean "rhnproductname", "rhnchannelproduct", "rhnreleasechannelmap", // clean "rhndistchannelmap", // clean "rhnchannelcomps", "rhnchannelfamilymembers", "rhnerrata", "rhnerratacloned", // add only if there are corresponding rows in rhnerrata "rhnchannelerrata", // clean "rhnpackagenevra", "rhnpackagename", "rhnpackagegroup", "rhnpackageevr", "rhnchecksum", "rhnpackage", "rhnchannelpackage", // clean "rhnerratapackage", // clean "rhnerratafile", // clean "rhnerratafilechannel", // clean "rhnerratafilepackage", // clean "rhnerratafilepackagesource", // clean "rhnpackagekeyassociation", "rhnpackagekey", "rhnerratabuglist", // clean "rhncve", "rhnerratacve", // clean "rhnerratakeyword", // clean "rhnpackagecapability", "rhnpackagebreaks", "rhnpackagechangelogdata", "rhnpackagechangelogrec", "rhnpackageconflicts", "rhnpackageenhances", "rhnpackageextratag", "rhnpackageextratagkey", "rhnpackagefile", "rhnpackageobsoletes", "rhnpackagepredepends", "rhnpackageprovides", "rhnpackagerecommends", "rhnpackagerequires", "rhnsourcerpm", "rhnpackagesource", "rhnpackagesuggests", "rhnpackagesupplements", "susemddata", // clean "susemdkeyword", // clean } } func ProductsTableNames() []string { return []string{ // product data tables "suseproducts", // clean "suseproductextension", // clean "suseproductsccrepository", // clean "susesccrepository", // clean "suseupgradepath", // clean // product data tables "rhnchannelfamily", "rhnpublicchannelfamily", } } func validateExportFolder(outputFolderAbs string) { err := utils.FolderExists(outputFolderAbs) if err != nil { if os.IsNotExist(err) { err := os.MkdirAll(outputFolderAbs, 0755) if err != nil { log.Fatal().Err(err).Msg("Error creating directory") } } else { log.Fatal().Err(err).Msg("Error getting output folder") } } outputFolder, _ := os.Open(outputFolderAbs) defer outputFolder.Close() _, errEmpty := outputFolder.Readdirnames(1) // Or f.Readdir(1) if errEmpty != io.EOF { log.Fatal().Msg(fmt.Sprintf("export location is not empty: %s", outputFolderAbs)) } } var childChannelSql = "select label from rhnchannel " + "where parent_channel = (select id from rhnchannel where label = $1)" var singleChannelSql = "select label from rhnchannel " + "where label = $1" func loadChannelsToProcess(db *sql.DB, options DumperOptions) []string { log.Trace().Msg("Loading channel list") channels := channelsProcess{make(map[string]bool), make([]string, 0)} for _, singleChannel := range options.ChannelLabels { if _, ok := channels.channelsMap[singleChannel]; !ok { dbChannel := sqlUtil.ExecuteQueryWithResults(db, singleChannelSql, singleChannel) if len(dbChannel) == 0 { log.Fatal().Msgf("Channel not found: %s", singleChannel) } channels.addChannelLabel(singleChannel) } } for _, channelChildren := range options.ChannelWithChildrenLabels { if _, ok := channels.channelsMap[channelChildren]; !ok { dbChannel := sqlUtil.ExecuteQueryWithResults(db, singleChannelSql, channelChildren) if len(dbChannel) == 0 { log.Fatal().Msgf("Channel not found: %s", channelChildren) } channels.addChannelLabel(channelChildren) childrenChannels := sqlUtil.ExecuteQueryWithResults(db, childChannelSql, channelChildren) for _, cChannel := range childrenChannels { cLabel := fmt.Sprintf("%v", cChannel[0].Value) if _, okC := channels.channelsMap[cLabel]; !okC { channels.addChannelLabel(cLabel) } } } } log.Debug().Msgf("Channels to export: %s", strings.Join(channels.channels, ",")) return channels.channels } func processAndInsertProducts(db *sql.DB, writer *bufio.Writer) { log.Trace().Msg("Processing product tables") schemaMetadata := schemareader.ReadTablesSchema(db, ProductsTableNames()) startingTables := []schemareader.Table{schemaMetadata["suseproducts"]} var whereFilterClause = func(table schemareader.Table) string { filterOrg := "" if _, ok := table.ColumnIndexes["org_id"]; ok { filterOrg = " where org_id is null" } return filterOrg } dumper.DumpAllTablesData(db, writer, schemaMetadata, startingTables, whereFilterClause, onlyIfParentExistsTables) writer.WriteString("-- end of product tables") writer.WriteString("\n") log.Debug().Msg("products export done") } func processAndInsertChannels(db *sql.DB, writer *bufio.Writer, options DumperOptions) { channels := loadChannelsToProcess(db, options) log.Info().Msg(fmt.Sprintf("%d channels to process", len(channels))) schemaMetadata := schemareader.ReadTablesSchema(db, SoftwareChannelTableNames()) log.Debug().Msg("channel schema metadata loaded") fileChannels, err := os.Create(options.GetOutputFolderAbsPath() + "/exportedChannels.txt") if err != nil { log.Panic().Err(err).Msg("error creating sql file") } defer fileChannels.Close() bufferWriterChannels := bufio.NewWriter(fileChannels) defer bufferWriterChannels.Flush() count := 0 for _, channelLabel := range channels { count++ log.Info().Msg(fmt.Sprintf("Processing channel [%d/%d] %s", count, len(channels), channelLabel)) processChannel(db, writer, channelLabel, schemaMetadata, options) writer.Flush() bufferWriterChannels.WriteString(fmt.Sprintf("%s\n", channelLabel)) } } func processChannel(db *sql.DB, writer *bufio.Writer, channelLabel string, schemaMetadata map[string]schemareader.Table, options DumperOptions) { whereFilter := fmt.Sprintf("label = '%s'", channelLabel) tableData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["rhnchannel"], whereFilter, options.StartingDate) if log.Debug().Enabled() { totalRows := 0 for _, value := range tableData.TableData { totalRows = totalRows + len(value.KeyMap) } log.Debug().Msgf("finished table data crawler. Total database rows to export: %d", totalRows) } cleanWhereClause := fmt.Sprintf(`WHERE rhnchannel.id = (SELECT id FROM rhnchannel WHERE label = '%s')`, channelLabel) printOptions := dumper.PrintSqlOptions{ TablesToClean: tablesToClean, CleanWhereClause: cleanWhereClause, OnlyIfParentExistsTables: onlyIfParentExistsTables} dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["rhnchannel"], tableData, printOptions) log.Debug().Msg("finished print table order") generateCacheCalculation(channelLabel, writer) if !options.MetadataOnly { log.Debug().Msg("dumping all package files") packageDumper.DumpPackageFiles(db, schemaMetadata, tableData, options.GetOutputFolderAbsPath()) } log.Debug().Msg("channel export finished") } func generateCacheCalculation(channelLabel string, writer *bufio.Writer) { // need to update channel modify since it's use to run repo metadata generation updateChannelModifyDate := fmt.Sprintf("update rhnchannel set modified = current_timestamp where label = '%s';", channelLabel) writer.WriteString(updateChannelModifyDate + "\n") // force system updates packages/patches for system using the channel serverErrataCache := fmt.Sprintf("select rhn_channel.update_needed_cache((select id from rhnchannel where label ='%s'));", channelLabel) writer.WriteString(serverErrataCache + "\n") // refreshes the package newest page channelNewPackages := fmt.Sprintf("select rhn_channel.refresh_newest_package((select id from rhnchannel where label ='%s'), 'inter-server-sync');", channelLabel) writer.WriteString(channelNewPackages + "\n") // generates the repository metadata on disk repoMetadata := fmt.Sprintf(` INSERT INTO rhnRepoRegenQueue (id, channel_label, client, reason, force, bypass_filters, next_action, created, modified) VALUES (null, '%s', 'inter server sync v2', 'channel sync', 'N', 'N', current_timestamp, current_timestamp, current_timestamp); `, channelLabel) writer.WriteString(repoMetadata + "\n") } 07070100000029000081A4000003E800000064000000016613BCD0000011FD000000000000000000000000000000000000003600000000inter-server-sync/entityDumper/configurationDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "bufio" "database/sql" "fmt" "os" "strings" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/dumper" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) func ConfigTableNames() []string { return []string{ "rhnconfigfile", "rhnconfigfilename", "rhnconfigrevision", "rhnconfigcontent", "rhnconfigchannel", "rhnregtokenconfigchannels", "rhnserverconfigchannel", "rhnsnapshotconfigchannel", "susestaterevisionconfigchannel", "rhnconfiginfo", "rhnconfigfilefailure", "rhnchecksum", } } func loadConfigsToProcess(db *sql.DB, options DumperOptions) []string { labels := channelsProcess{make(map[string]bool), make([]string, 0)} for _, singleChannel := range options.ConfigLabels { if _, ok := labels.channelsMap[singleChannel]; !ok { labels.addChannelLabel(singleChannel) } } return labels.channels } func processConfigs(db *sql.DB, writer *bufio.Writer, options DumperOptions) { configs := loadConfigsToProcess(db, options) log.Info().Msg(fmt.Sprintf("%d configuration channels to process", len(configs))) schemaMetadata := schemareader.ReadTablesSchema(db, ConfigTableNames()) log.Debug().Msg("channel schema metadata loaded") configLabels, err := os.Create(options.GetOutputFolderAbsPath() + "/exportedConfigs.txt") if err != nil { log.Panic().Err(err).Msg("error creating exportedConfigChannel file") } defer configLabels.Close() bufferWriterChannels := bufio.NewWriter(configLabels) defer bufferWriterChannels.Flush() count := 0 for _, l := range configs { count++ log.Debug().Msg(fmt.Sprintf("Processing channel [%d/%d] %s", count, len(configs), l)) processConfigChannel(db, writer, l, schemaMetadata, options) writer.Flush() bufferWriterChannels.WriteString(fmt.Sprintf("%s\n", l)) } } func processConfigChannel(db *sql.DB, writer *bufio.Writer, channelLabel string, schemaMetadata map[string]schemareader.Table, options DumperOptions) { whereFilter := fmt.Sprintf("label = '%s'", channelLabel) tableData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["rhnconfigchannel"], whereFilter, options.StartingDate) log.Debug().Msg("finished table data crawler") cleanWhereClause := fmt.Sprintf(`WHERE rhnconfigchannel.id = (SELECT id FROM rhnconfigchannel WHERE label = '%s')`, channelLabel) printOptions := dumper.PrintSqlOptions{ TablesToClean: tablesToClean, CleanWhereClause: cleanWhereClause, OnlyIfParentExistsTables: onlyIfParentExistsTables, PostOrderCallback: createPostOrderCallback(), } dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["rhnconfigchannel"], tableData, printOptions) log.Debug().Msg("finished print table order") log.Info().Msg("config channel export finished") } func createPostOrderCallback() dumper.Callback { return func(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, table schemareader.Table, data dumper.DataDumper) { tableData, dataOK := data.TableData[table.Name] if strings.Compare(table.Name, "rhnconfigfile") == 0 { if dataOK { exportPoint := 0 batch := 100 for len(tableData.Keys) > exportPoint { upperLimit := exportPoint + batch if upperLimit > len(tableData.Keys) { upperLimit = len(tableData.Keys) } rows := dumper.GetRowsFromKeys(db, table, tableData.Keys[exportPoint:upperLimit]) for _, rowValue := range rows { rowValue = dumper.SubstituteForeignKey(db, table, schemaMetadata, rowValue) updateString := genUpdateForReference(rowValue) writer.WriteString(updateString + "\n") } exportPoint = upperLimit } } } } } func genUpdateForReference(value []sqlUtil.RowDataStructure) string { var updateString string var latestConfigRevisionId, configFileNameId, configChannelId interface{} for _, field := range value { if strings.Compare(field.ColumnName, "latest_config_revision_id") == 0 { latestConfigRevisionId = field.Value } if strings.Compare(field.ColumnName, "config_file_name_id") == 0 { configFileNameId = field.Value } if strings.Compare(field.ColumnName, "config_channel_id") == 0 { configChannelId = field.Value } } updateString = fmt.Sprintf("update rhnconfigfile set latest_config_revision_id = (%s) where config_file_name_id = (%s) and config_channel_id = (%s);", latestConfigRevisionId, configFileNameId, configChannelId) return updateString } 0707010000002A000081A4000003E800000064000000016613BCD0000004D1000000000000000000000000000000000000002900000000inter-server-sync/entityDumper/dumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "bufio" "compress/gzip" "os" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/schemareader" ) func DumpAllEntities(options DumperOptions) { var outputFolderAbs = options.GetOutputFolderAbsPath() validateExportFolder(outputFolderAbs) file, err := os.OpenFile(outputFolderAbs+"/sql_statements.sql.gz", os.O_WRONLY|os.O_CREATE, 0600) if err != nil { log.Panic().Err(err).Msg("error creating sql file") } defer file.Close() gzipFile := gzip.NewWriter(file) defer gzipFile.Close() bufferWriter := bufio.NewWriterSize(gzipFile, 32768) defer bufferWriter.Flush() db := schemareader.GetDBconnection(options.ServerConfig) defer db.Close() bufferWriter.WriteString("BEGIN;\n") if len(options.ChannelLabels) > 0 || len(options.ChannelWithChildrenLabels) > 0 { processAndInsertProducts(db, bufferWriter) processAndInsertChannels(db, bufferWriter, options) } if len(options.ConfigLabels) > 0 { processConfigs(db, bufferWriter, options) } if options.OSImages || options.Containers { dumpImageData(db, bufferWriter, options) } bufferWriter.WriteString("COMMIT;\n") } 0707010000002B000081A4000003E800000064000000016613BCD000002FBE000000000000000000000000000000000000003200000000inter-server-sync/entityDumper/imageDataDumper.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "bufio" "database/sql" "fmt" "path/filepath" "strings" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/dumper" "github.com/uyuni-project/inter-server-sync/dumper/osImageDumper" "github.com/uyuni-project/inter-server-sync/dumper/pillarDumper" "github.com/uyuni-project/inter-server-sync/schemareader" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) // TablesToClean represents Tables which needs to be cleaned in case on client side there is a record that doesn't exist anymore on master side var tablesToClean_images = []string{ "suseimageinfochannel", } // Activation keys are not exported - they are managed by uyuni formulas and/or XMLRPC/salt calls // If correct activation key is not present, OS images, particularly saltboot images, may not finish bootstrap correctly var imagesTableNames = []string{ // stores "suseImageStore", "suseImageStoreType", "suseCredentials", // profiles "suseImageProfile", "suseKiwiProfile", "suseDockerfileProfile", "rhnRegToken", // images "rhnchecksum", "suseImageFile", "suseImageInfo", "suseImageInfoChannel", "suseImageInfoPackage", "suseimageinfoinstalledproduct", "suseImageOverview", "susecveimagechannel", "suseImageCustomDataValue", // packages in image - this is needed because of custom rpm with SSL certificate "rhnpackageevr", "rhnpackagearch", "rhnpackagename", // generic table for pillars "suseSaltPillar", } func markAsExported(schema map[string]schemareader.Table, tables []string) { for _, table := range tables { tmp := schema[table] tmp.Export = false schema[table] = tmp } } func markAsUnexported(schema map[string]schemareader.Table, tables []string) { for _, table := range tables { tmp := schema[table] tmp.Export = true schema[table] = tmp } } func isColumnInTable(schema map[string]schemareader.Table, table string, column string) bool { columns := schema[table].Columns for _, c := range columns { if strings.Compare(c, column) == 0 { return true } } return false } func dumpImageStores(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, options DumperOptions, store_label string) { sqlForExistingStores := fmt.Sprintf( "SELECT sis.id from suseimagestore AS sis JOIN suseimagestoretype AS sist ON sis.store_type_id = sist.id WHERE sist.label = '%s'", store_label) for _, org := range options.Orgs { sqlForExistingStores = fmt.Sprintf("%s AND sis.org_id = %d", sqlForExistingStores, org) } if options.StartingDate != "" { sqlForExistingStores = fmt.Sprintf("%s AND sis.modified > '%s'::timestamp", sqlForExistingStores, options.StartingDate) } stores := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingStores) if len(stores) > 0 { log.Debug().Msgf("Dumping ImageStores tables for label %s", store_label) writer.WriteString(fmt.Sprintf("-- %s Image Stores\n", store_label)) for _, store := range stores { log.Trace().Msgf("Exporting store id %s", store[0].Value) whereClause := fmt.Sprintf("id = '%s'", store[0].Value) tableProfilesData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["suseimagestore"], whereClause, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["suseimagestore"], tableProfilesData, dumper.PrintSqlOptions{}) } // Mark tables as exported so they are not transitively exported by profiles markAsExported(schemaMetadata, []string{"suseimagestore"}) } else { log.Info().Msg("No image stores found to export") } } /* * Dump OS image tables, return true if additional data (pillars, images) need to be also dumped */ func dumpOSImageTables(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, options DumperOptions, outputFolderImagesAbs string) bool { // Image profiles sqlForExistingProfiles := "SELECT profile_id FROM suseimageprofile WHERE image_type = 'kiwi'" for _, org := range options.Orgs { sqlForExistingProfiles = fmt.Sprintf("%s AND org_id = %d", sqlForExistingProfiles, org) } if options.StartingDate != "" { sqlForExistingProfiles = fmt.Sprintf("%s AND modified > '%s'::timestamp", sqlForExistingProfiles, options.StartingDate) } profiles := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingProfiles) if len(profiles) > 0 { log.Debug().Msg("Dumping ImageProfile tables") writer.WriteString("-- OS Image Profiles\n") for _, profile := range profiles { log.Trace().Msgf("Exporting profile id %s", profile[0].Value) whereClause := fmt.Sprintf("profile_id = '%s'", profile[0].Value) tableProfilesData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["susekiwiprofile"], whereClause, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["susekiwiprofile"], tableProfilesData, dumper.PrintSqlOptions{}) } // Mark tables as exported so they are not transitively exported by images markAsExported(schemaMetadata, []string{"suseimageprofile"}) } else { log.Info().Msg("No Kiwi profiles found to export") } // Images needExtraExport := false sqlForExistingImages := "SELECT id FROM suseimageinfo WHERE image_type = 'kiwi'" if isColumnInTable(schemaMetadata, "suseimageinfo", "built") { // For 4.3 and newer export only succesfuly built images sqlForExistingImages = fmt.Sprintf("%s AND built = 'Y'", sqlForExistingImages) } for _, org := range options.Orgs { sqlForExistingImages = fmt.Sprintf("%s AND org_id = %d", sqlForExistingImages, org) } if options.StartingDate != "" { sqlForExistingImages = fmt.Sprintf("%s AND modified > '%s'::timestamp", sqlForExistingImages, options.StartingDate) } images := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingImages) if len(images) > 0 { dumperOptions := dumper.PrintSqlOptions{ OnlyIfParentExistsTables: []string{"suseimageinfochannel"}, } log.Debug().Msg("Dumping Image tables") writer.WriteString("-- OS Images\n") for _, image := range images { log.Trace().Msgf("Exporting image id %s", image[0].Value) whereClause := fmt.Sprintf("id = '%s'", image[0].Value) tableImageData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["suseimageinfo"], whereClause, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["suseimageinfo"], tableImageData, dumperOptions) // Check if pillars are already in database if _, ok := tableImageData.TableData["susesaltpillar"]; ok && !options.MetadataOnly { // pillars in database, files must be as well // export all metadata about images, but skip linked suseimageinfo markAsExported(schemaMetadata, []string{"suseimageinfo"}) whereClauseImageFiles := fmt.Sprintf("image_info_id = '%s'", image[0].Value) tableImageFilesData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["suseimagefile"], whereClauseImageFiles, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["suseimagefile"], tableImageFilesData, dumper.PrintSqlOptions{}) // find all local (not-external) image files for the image and export their files sqlForExistingLocalImageFiles := fmt.Sprintf("SELECT file, org_id FROM suseimagefile AS sif JOIN suseimageinfo AS sii "+ "ON sif.image_info_id = sii.id WHERE sii.id = '%s' AND external = 'N'", image[0].Value) imageFiles := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingLocalImageFiles) for _, imageFile := range imageFiles { // source is taken from basedir + org + filename from db // output should be base abs dir + org + filename from db file := (imageFile[0].Value).(string) org := fmt.Sprintf("%s", imageFile[1].Value) source := osImageDumper.GetImagePathForImage(file, org) target := osImageDumper.GetImagePathForImage(file, org, outputFolderImagesAbs) osImageDumper.DumpOsImage(target, source) } // we marked this as exported for image files, now we need to unexport for the rest of the images markAsUnexported(schemaMetadata, []string{"suseimageinfo"}) } else { // pillars and thus image files are not in database, need extra export step needExtraExport = true } } } log.Info().Msg("Kiwi image export done") return needExtraExport } func dumpContainerImageTables(db *sql.DB, writer *bufio.Writer, schemaMetadata map[string]schemareader.Table, options DumperOptions) { // Image profiles sqlForExistingProfiles := "SELECT profile_id FROM suseimageprofile WHERE image_type = 'dockerfile'" for _, org := range options.Orgs { sqlForExistingProfiles = fmt.Sprintf("%s AND org_id = %d", sqlForExistingProfiles, org) } if options.StartingDate != "" { sqlForExistingProfiles = fmt.Sprintf("%s AND modified > '%s'::timestamp", sqlForExistingProfiles, options.StartingDate) } profiles := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingProfiles) if len(profiles) > 0 { log.Debug().Msg("Dumping ImageProfile tables") writer.WriteString("-- Dockerfile Profiles\n") for _, profile := range profiles { log.Trace().Msgf("Exporting profile id %s", profile[0].Value) whereClause := fmt.Sprintf("profile_id = '%s'", profile[0].Value) tableProfilesData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["susedockerfileprofile"], whereClause, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["susedockerfileprofile"], tableProfilesData, dumper.PrintSqlOptions{}) } markAsExported(schemaMetadata, []string{"suseimageprofile"}) } else { log.Info().Msg("No profiles found to export") } // Images sqlForExistingImages := "SELECT id FROM suseimageinfo WHERE image_type = 'dockerfile'" if isColumnInTable(schemaMetadata, "suseimageinfo", "built") { // For 4.3 and newer export only succesfuly built images sqlForExistingImages = fmt.Sprintf("%s AND built = 'Y'", sqlForExistingImages) } for _, org := range options.Orgs { sqlForExistingImages = fmt.Sprintf("%s AND org_id = %d", sqlForExistingImages, org) } if options.StartingDate != "" { sqlForExistingImages = fmt.Sprintf("%s AND modified > '%s'::timestamp", sqlForExistingImages, options.StartingDate) } images := sqlUtil.ExecuteQueryWithResults(db, sqlForExistingImages) if len(images) > 0 { log.Debug().Msg("Dumping Image tables") writer.WriteString("-- Dockerfile Images\n") for _, image := range images { log.Trace().Msgf("Exporting image id %s", image[0].Value) whereClause := fmt.Sprintf("id = '%s'", image[0].Value) tableImageData := dumper.DataCrawler(db, schemaMetadata, schemaMetadata["suseimageinfo"], whereClause, options.StartingDate) dumper.PrintTableDataOrdered(db, writer, schemaMetadata, schemaMetadata["suseimageinfo"], tableImageData, dumper.PrintSqlOptions{}) } } log.Info().Msg("Dockerfile image export done") } // Main entry point func dumpImageData(db *sql.DB, writer *bufio.Writer, options DumperOptions) { log.Debug().Msg("Starting image metadata dump") var outputFolderAbs = options.GetOutputFolderAbsPath() // export DB data about images log.Trace().Msg("Loading table schema") schemaMetadata := schemareader.ReadTablesSchema(db, imagesTableNames) if options.OSImages { var outputFolderImagesAbs = filepath.Join(outputFolderAbs, "images") ValidateExportFolder(outputFolderImagesAbs) dumpImageStores(db, writer, schemaMetadata, options, "os_image") if dumpOSImageTables(db, writer, schemaMetadata, options, outputFolderImagesAbs) { var outputFolderPillarAbs = filepath.Join(outputFolderAbs, "images", "pillars") ValidateExportFolder(outputFolderPillarAbs) pillarDumper.DumpImagePillars(outputFolderPillarAbs, options.Orgs, options.ServerConfig) if !options.MetadataOnly { osImageDumper.DumpOsImages(outputFolderImagesAbs, options.Orgs) } } // This is needed for containers to be able to export their respective tables markAsUnexported(schemaMetadata, []string{"suseimagestore", "suseimageprofile"}) } if options.Containers { dumpImageStores(db, writer, schemaMetadata, options, "registry") dumpContainerImageTables(db, writer, schemaMetadata, options) } } 0707010000002C000081A4000003E800000064000000016613BCD0000003D9000000000000000000000000000000000000002800000000inter-server-sync/entityDumper/types.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "github.com/uyuni-project/inter-server-sync/utils" ) type DumperOptions struct { ServerConfig string ChannelLabels []string ConfigLabels []string ChannelWithChildrenLabels []string OutputFolder string outputFolderAbsPath string MetadataOnly bool StartingDate string Containers bool OSImages bool Orgs []uint } func (opt *DumperOptions) GetOutputFolderAbsPath() string { if "" == opt.outputFolderAbsPath { opt.outputFolderAbsPath = utils.GetAbsPath(opt.OutputFolder) } return opt.outputFolderAbsPath } type channelsProcess struct { channelsMap map[string]bool channels []string } func (c *channelsProcess) addChannelLabel(label string) { c.channelsMap[label] = true c.channels = append(c.channels, label) } 0707010000002D000081A4000003E800000064000000016613BCD00000038E000000000000000000000000000000000000002800000000inter-server-sync/entityDumper/utils.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package entityDumper import ( "fmt" "io" "os" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/utils" ) func ValidateExportFolder(outputFolderAbs string) { ValidateExistingFolder(outputFolderAbs) outputFolder, _ := os.Open(outputFolderAbs) defer outputFolder.Close() _, errEmpty := outputFolder.Readdirnames(1) // Or f.Readdir(1) if errEmpty != io.EOF { log.Fatal().Msg(fmt.Sprintf("Export location is not empty: %s", outputFolderAbs)) } } func ValidateExistingFolder(outputFolderAbs string) { err := utils.FolderExists(outputFolderAbs) if err != nil { if os.IsNotExist(err) { err := os.MkdirAll(outputFolderAbs, 0755) if err != nil { log.Fatal().Err(err).Msg("Error creating directory") } } else { log.Fatal().Err(err).Msg("Error getting output folder") } } } 0707010000002E000081A4000003E800000064000000016613BCD00000018B000000000000000000000000000000000000001900000000inter-server-sync/go.modmodule github.com/uyuni-project/inter-server-sync go 1.20 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/lib/pq v1.8.0 github.com/rs/zerolog v1.21.0 github.com/spf13/cobra v1.1.3 github.com/uyuni-project/xmlrpc-public-methods v0.0.0-20200805144514-2ca831c526d1 ) require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect ) 0707010000002F000081A4000003E800000064000000016613BCD00000781F000000000000000000000000000000000000001900000000inter-server-sync/go.sumcloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM= github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uyuni-project/xmlrpc-public-methods v0.0.0-20200805144514-2ca831c526d1 h1:sFoztHlAAdPjwujDzYeuKQ9RWAmyHAt4xdma+NHNCA8= github.com/uyuni-project/xmlrpc-public-methods v0.0.0-20200805144514-2ca831c526d1/go.mod h1:hS7daAcI0sq8g9GWR4cffika5MG3ClsWjvc/XongvCU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= 07070100000030000081A4000003E800000064000000016613BCD000001672000000000000000000000000000000000000002C00000000inter-server-sync/inter-server-sync.changes------------------------------------------------------------------- Mon Apr 08 10:45:50 WEST 2024 - rmateus@suse.com - version 0.3.3-1 * Correct primary key export for table suseproductsccrepository (bsc#1220169) ------------------------------------------------------------------- Mon Jan 08 15:23:19 CET 2024 - michele.bussolotto@suse.com - version 0.3.2-1 * Use tito to push to OBS and help releasing * Fix conflict in rhndistchannelmap (bsc#1216114) ------------------------------------------------------------------- Thu Nov 2 12:01:52 UTC 2023 - Ricardo Mateus <rmateus@suse.com> - version 0.3.1 * Correct index sequence name for table rhnPackageExtraTagKey * Require at least Go 1.20 for building SUSE packages ------------------------------------------------------------------- Wed Aug 9 12:56:27 UTC 2023 - Witek Bedyk <witold.bedyk@suse.com> - version 0.3.0 * Require at least Go 1.19 for building due to CVE-2023-29409 * Require at least Go 1.18 for building Red Hat packages * CVE-2023-29409: Restrict RSA keys in certificates to less than or equal to 8192 bits to avoid DoSing client/server while validating signatures for extremely large RSA keys. (bsc#1213880) ------------------------------------------------------------------- Wed May 24 10:07:35 UTC 2023 - Cédric Bosdonnat <cbosdonnat@suse.com> - version 0.2.8 * Correctly detect product name and product version number * Import image channel data only when related software channel is available (bsc#1211330) ------------------------------------------------------------------- Wed Feb 8 10:41:54 UTC 2023 - Cédric Bosdonnat <cbosdonnat@suse.com> - version 0.2.7 * Do not update pillars table if it does not exists like in 4.2 ------------------------------------------------------------------- Mon Jan 9 14:33:35 UTC 2023 - Cédric Bosdonnat <cbosdonnat@suse.com> - version 0.2.6 * Export package extra tags for complete debian repo metatdata (bsc#1206375) * Replace URLs in OS Images pillars when exporting and importing images ------------------------------------------------------------------- Thu Nov 10 10:34:51 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.2.5 * Correct error when importing without debug log level (bsc#1204699) ------------------------------------------------------------------- Wed Oct 12 10:31:36 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.2.4 * Improve memory usage and log information #17193 * Conditional insert check for FK reference exists (bsc#1202785) * Correct navigation path for table rhnerratafilechannel (bsc#1202785) ------------------------------------------------------------------- Thu Jul 21 11:21:25 UTC 2022 - Artem Shiliaev <artem.shiliaev@suse.com> - version 0.2.3 * Compress exported sql data and decompress during import #16631 * Add gzip dependency to decompress data file during import process ------------------------------------------------------------------- Tue May 17 14:24:28 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.2.2 * Parameter --channel-with-children didn't export data(bsc#1199089) * Clean rhnchannelcloned table to rebuild hierarchy (bsc#1197400) ------------------------------------------------------------------- Fri Apr 22 09:21:52 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.2.1 * Correct sequence in use for table rhnpackagekey(bsc#1197400) * Make Docker image export compatible with Suse Manager 4.2 ------------------------------------------------------------------- Wed Apr 6 17:06:47 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.2.0 * Allow images export and import (os based and Docker) ------------------------------------------------------------------- Thu Mar 24 17:15:00 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.1.0 * Allow export and import of configuration channels * Clean lookup cache after processing a channel (bsc#1195750) * Improve lookup method for generate foreign key export ------------------------------------------------------------------- Sun Feb 14 11:36:09 UTC 2022 - Stefan Bluhm <stefan.bluhm@clacee.eu> - Adapted for build on Enterprise Linux 8. ------------------------------------------------------------------- Mon Jan 31 14:00:15 UTC 2022 - Ricardo Mateus <rmateus@suse.com> - version 0.0.7 * Correct database sequence name used for table rhnChecksum * Add support for partial exports based on a date (bsc#1195008) * Export table rhnpackagekey (bsc#1194764) ------------------------------------------------------------------- Tue Dec 21 15:38:27 UTC 2021 - Ricardo Mateus <rmateus@suse.com> - version 0.0.6 * Fix bug when exporting rhnpackagecapability table ------------------------------------------------------------------- Tue Sep 7 16:24:21 UTC 2021 - Ricardo Mateus <rmateus@suse.com> - Use systemd rpm macro instead of direct call to systemctl ------------------------------------------------------------------- Thu Sep 2 10:20:21 UTC 2021 - Jordi Massaguer <jmassaguerpla@suse.com> - fix post section: use try-restart instead of restart, so we do not start rsyslog if it was not running before. ------------------------------------------------------------------- Wed Jul 28 14:56:36 UTC 2021 - Ricardo Mateus <rmateus@suse.com> - version 0.0.5 - correctly export packages change log data ------------------------------------------------------------------- Tue Jul 23 17:24:50 UTC 2021 - Ricardo Mateus <rmateus@suse.com> - version 0.0.4 - hidden dot sub-command ------------------------------------------------------------------- Tue May 25 06:11:50 UTC 2021 - Abid Mehmood <amehmood@suse.com> - version 0.0.1 - Import and Export tools for ISS v2 07070100000031000081A4000003E800000064000000016613BCD000000AFC000000000000000000000000000000000000002900000000inter-server-sync/inter-server-sync.spec# # spec file for package uyuni inter server sync # # Copyright (c) 2023 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # Please submit bugfixes or comments via https://bugs.opensuse.org/ # %if 0%{?rhel} == 8 %global debug_package %{nil} %endif %if 0%{?rhel} # Fix ERROR: No build ID note found in %undefine _missing_build_ids_terminate_build %endif %global provider github %global provider_tld com %global org uyuni-project %global project inter-server-sync %global provider_prefix %{provider}.%{provider_tld}/%{org}/%{project} Name: %{project} Version: 0.3.3 Release: 1 Summary: Export/import data on a uyuni server License: Apache-2.0 Group: System/Management URL: https://%{provider_prefix} Source0: %{name}-%{version}.tar.gz Source1: vendor.tar.gz BuildRequires: golang-packaging %if 0%{?rhel} BuildRequires: golang >= 1.18 %else BuildRequires: golang(API) >= 1.20 %endif BuildRequires: rsyslog Requires: gzip Requires: logrotate Requires: rsyslog Requires: systemd %description Uyuni inter server sync tool Used to export content from one server and import it in a target server. %prep %autosetup tar -zxf %{SOURCE1} %build export GOFLAGS=-mod=vendor %goprep %{provider_prefix} %gobuild -ldflags "-X github.com/uyuni-project/inter-server-sync/cmd.Version=%{version}" ... %install %goinstall %gosrc %gofilelist # Add config files for hub install -d -m 0750 %{buildroot}%{_var}/log/hub # Add syslog config to redirect logs to /var/log/hub/iss2.log install -D -m 0644 release/hub-iss-syslogs.conf %{buildroot}%{_sysconfdir}/rsyslog.d/hub-iss.conf #logrotate config install -D -m 0644 release/hub-iss-logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/inter-server-sync %check %if 0%{?rhel} # Fix OBS debug_package execution. rm -f %{buildroot}/usr/lib/debug/%{_bindir}/%{name}-%{version}-*.debug %endif %post %if 0%{?rhel} %systemd_postun rsyslog.service %else %service_del_postun rsyslog.service %endif %files -f file.lst %defattr(-,root,root) %doc README.md %license LICENSES %{_bindir}/inter-server-sync %config(noreplace) %{_sysconfdir}/rsyslog.d/hub-iss.conf %config(noreplace) %{_sysconfdir}/logrotate.d/inter-server-sync %changelog 07070100000032000081A4000003E800000064000000016613BCD0000000BF000000000000000000000000000000000000001A00000000inter-server-sync/main.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package main import ( "github.com/uyuni-project/inter-server-sync/cmd" ) func main() { cmd.Execute() } 07070100000033000081A4000003E800000064000000016613BCD000000158000000000000000000000000000000000000001F00000000inter-server-sync/main_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package main import ( "os" "testing" ) func TestAppStartsWithArguments(t *testing.T) { // Arrange // set command lines args name := os.Args[0] os.Args = []string{ name, "help", } // Act main() // Assert // checks for exit code implicitly } 07070100000034000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001A00000000inter-server-sync/release07070100000035000081A4000003E800000064000000016613BCD000000151000000000000000000000000000000000000003100000000inter-server-sync/release/hub-iss-logrotate.conf# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 # logrotation file for Inter server sync # /var/log/hub/iss2.log { weekly rotate 5 copytruncate compress notifempty missingok size=10M postrotate /usr/bin/systemctl reload rsyslog.service > /dev/null endscript }07070100000036000081A4000003E800000064000000016613BCD000000099000000000000000000000000000000000000002F00000000inter-server-sync/release/hub-iss-syslogs.conf# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 if $programname == 'inter-server-sync' then /var/log/hub/iss2.log & stop07070100000037000081A4000003E800000064000000016613BCD0000000BC000000000000000000000000000000000000002300000000inter-server-sync/rhn.conf.example# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 db_backend = postgresql db_user = ... db_password = ... db_name = susemanager db_host = ... db_port = 5432 07070100000038000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001F00000000inter-server-sync/schemareader07070100000039000081A4000003E800000064000000016613BCD000000D0D000000000000000000000000000000000000002C00000000inter-server-sync/schemareader/constants.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader const ( ReadTableNames = `SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type = 'BASE TABLE';` ReadColumnNames = `SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = $1 ORDER BY ordinal_position;` ReadPkColumnNames = `SELECT a.attname FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = $1::regclass AND i.indisprimary;` ReadUniqueIndexNames = `SELECT DISTINCT indexrelid::regclass FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = $1::regclass AND i.indisunique AND NOT i.indisprimary;` ReadIndexColumns = `SELECT DISTINCT a.attname FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE indexrelid::regclass = $1::regclass;` ReadReferenceConstraintNames = `SELECT DISTINCT tc.constraint_name FROM information_schema.table_constraints AS tc JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = $1;` ReadReferencedByConstraintNames = `SELECT DISTINCT tc.constraint_name FROM information_schema.table_constraints AS tc JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND ccu.table_name = $1;` ReadReferencedTable = `SELECT DISTINCT ccu.table_name FROM information_schema.constraint_column_usage AS ccu WHERE ccu.constraint_name = $1;` ReadReferencedByTable = `SELECT DISTINCT table_name FROM information_schema.table_constraints as tc WHERE tc.constraint_name = $1;` ReadReferenceConstraints = `SELECT DISTINCT kcu.column_name, ccu.column_name AS foreign_column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name AND tc.table_schema = kcu.table_schema AND tc.table_name = kcu.table_name JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND tc.table_schema = ccu.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = $1 AND tc.constraint_name = $2;` ReadPkSequence = `WITH sequences AS ( SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = 'public' ), id_constraints AS ( SELECT tc.constraint_name, tc.table_name, kcu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name WHERE tc.constraint_schema = 'public' AND constraint_type = 'PRIMARY KEY' AND kcu.ordinal_position = 1 AND column_name = 'id' AND tc.table_name = $1 ) SELECT sequence_name FROM id_constraints JOIN sequences ON replace(regexp_replace(constraint_name, '(_id)?_pk(ey)?', ''), '_', '') = replace(regexp_replace(sequence_name, '(_id)?_seq', ''), '_', '')` ) 0707010000003A000081A4000003E800000064000000016613BCD000000692000000000000000000000000000000000000002D00000000inter-server-sync/schemareader/datasource.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import ( "bufio" "database/sql" "fmt" "os" "strings" "github.com/rs/zerolog/log" ) type dataSource struct { host string port string dbname string user string password string } // GetConnectionString return the connection string for the database after reading config file for func GetConnectionString(configFilePath string) string { file, err := os.Open(configFilePath) if err != nil { log.Panic().Err(err).Msg("error loading configuration file") } defer file.Close() dataSource := &dataSource{} scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if equal := strings.Index(line, "="); equal >= 0 { value := "" if len(line) > equal { value = strings.TrimSpace(line[equal+1:]) } if key := strings.TrimSpace(line[:equal]); len(key) > 0 { switch key { case "db_host": dataSource.host = value case "db_port": dataSource.port = value case "db_name": dataSource.dbname = value case "db_user": dataSource.user = value case "db_password": dataSource.password = value } } } } return fmt.Sprintf("user='%s' password='%s' dbname='%s' host='%s' port='%s' sslmode=disable", dataSource.user, dataSource.password, dataSource.dbname, dataSource.host, dataSource.port) } //GetDBconnection return the database connection func GetDBconnection(configFilePath string) *sql.DB { db, err := sql.Open("postgres", GetConnectionString(configFilePath)) if err != nil { log.Panic().Err(err).Msg("error getting connection to the database") } return db } 0707010000003B000081A4000003E800000064000000016613BCD0000007DA000000000000000000000000000000000000002600000000inter-server-sync/schemareader/dot.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import ( "fmt" "strings" ) // DumpToGraphviz outputs a dot representation of a schema. Use: // go run . | dot -Tx11 func DumpToGraphviz(tables map[string]Table) { fmt.Printf("graph schema {\n") fmt.Printf(" layout=fdp;\n") fmt.Printf(" K=0.15;\n") fmt.Printf(" maxiter=1000;\n") fmt.Printf(" start=0;\n\n") for _, table := range tables { fmt.Printf("\"%s\" [shape=box];\n", table.Name) for _, column := range table.Columns { _, primary := table.PKColumns[column] color := "transparent" if primary { color = "gainsboro" } fmt.Printf("\"%s-%s\" [label=\"\" xlabel=\"%s\" style=filled fillcolor=\"%s\"];\n", table.Name, column, column, color) fmt.Printf("\"%s\" -- \"%s-%s\";\n", table.Name, table.Name, column) } if len(table.PKSequence) > 0 { fmt.Printf("\"%s-id-%s\" [label=\"%s\" shape=note];\n", table.Name, table.PKSequence, table.PKSequence) fmt.Printf("\"%s-id\" -- \"%s-id-%s\" [style=dashed];\n", table.Name, table.Name, table.PKSequence) } for _, index := range table.UniqueIndexes { label := "unique" if len(table.MainUniqueIndexName) > 0 { if strings.Compare(index.Name, table.MainUniqueIndexName) == 0 { label = "unique main" } } fmt.Printf("\"%s\" [label=\"%s\" shape=tab];\n", index.Name, label) for _, indexColumn := range index.Columns { fmt.Printf("\"%s-%s\" -- \"%s\" [style=dashed];\n", table.Name, indexColumn, index.Name) } } for i, reference := range table.References { fmt.Printf("\"%s-%s-%d\" [label=\"\" shape=diamond];\n", table.Name, reference.TableName, i) for column, foreignColumn := range reference.ColumnMapping { fmt.Printf("\"%s-%s-%d\" -- \"%s-%s\";\n", table.Name, reference.TableName, i, table.Name, column) fmt.Printf("\"%s-%s-%d\" -- \"%s-%s\";\n", table.Name, reference.TableName, i, reference.TableName, foreignColumn) } } } fmt.Printf("}") } 0707010000003C000081A4000003E800000064000000016613BCD000002DED000000000000000000000000000000000000002900000000inter-server-sync/schemareader/reader.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import ( "database/sql" "strings" "github.com/rs/zerolog/log" ) func readTableNames(db *sql.DB) []string { sql := `SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type = 'BASE TABLE';` rows, err := db.Query(sql) if err != nil { log.Panic().Err(err).Msg("error executing database query") } result := make([]string, 0) for rows.Next() { var tableName string err := rows.Scan(&tableName) if err != nil { log.Panic().Err(err).Msg("error extracting row") } result = append(result, tableName) } return result } func readColumnNames(db *sql.DB, tableName string) []string { sql := `SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = $1 ORDER BY ordinal_position;` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error accessing the database") } defer rows.Close() result := make([]string, 0) for rows.Next() { var columnName string err := rows.Scan(&columnName) if err != nil { log.Panic().Err(err).Msg("error extracting row") } result = append(result, columnName) } return result } func readPKColumnNames(db *sql.DB, tableName string) []string { // https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns sql := `SELECT a.attname FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = $1::regclass AND i.indisprimary;` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make([]string, 0) for rows.Next() { var columnName string err := rows.Scan(&columnName) if err != nil { log.Panic().Err(err).Msg("error getting row data") } result = append(result, columnName) } return result } func readUniqueIndexNames(db *sql.DB, tableName string) []string { sql := `SELECT DISTINCT indexrelid::regclass FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = $1::regclass AND i.indisunique AND NOT i.indisprimary;` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make([]string, 0) for rows.Next() { var name string err := rows.Scan(&name) if err != nil { log.Panic().Err(err).Msg("error getting column data") } result = append(result, name) } return result } func readIndexColumns(db *sql.DB, indexName string) []string { sql := `SELECT DISTINCT a.attname FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE indexrelid::regclass = $1::regclass;` rows, err := db.Query(sql, indexName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make([]string, 0) for rows.Next() { var name string err := rows.Scan(&name) if err != nil { log.Panic().Err(err).Msg("error getting column data") } result = append(result, name) } return result } func readReferenceConstraintNames(db *sql.DB, tableName string) []string { sql := `SELECT DISTINCT tc.constraint_name FROM information_schema.table_constraints AS tc JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = $1;` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make([]string, 0) for rows.Next() { var name string err := rows.Scan(&name) if err != nil { log.Panic().Err(err).Msg("error getting column data") } result = append(result, name) } return result } func readReferencedByConstraintNames(db *sql.DB, tableName string) []string { sql := `SELECT DISTINCT tc.constraint_name FROM information_schema.table_constraints AS tc JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND ccu.table_schema = tc.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND ccu.table_name = $1;` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make([]string, 0) for rows.Next() { var name string err := rows.Scan(&name) if err != nil { log.Panic().Err(err).Msg("error getting column data") } result = append(result, name) } return result } func readReferencedTable(db *sql.DB, referenceConstraintName string) string { sql := `SELECT DISTINCT ccu.table_name FROM information_schema.constraint_column_usage AS ccu WHERE ccu.constraint_name = $1;` rows, err := db.Query(sql, referenceConstraintName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() var name string rows.Next() rows.Scan(&name) return name } func readReferencedByTable(db *sql.DB, referenceConstraintName string) string { sql := `SELECT DISTINCT table_name FROM information_schema.table_constraints as tc WHERE tc.constraint_name = $1;` rows, err := db.Query(sql, referenceConstraintName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() var name string rows.Next() rows.Scan(&name) return name } func readReferenceConstraints(db *sql.DB, tableName string, referenceConstraintName string) map[string]string { sql := `SELECT DISTINCT kcu.column_name, ccu.column_name AS foreign_column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name AND tc.table_schema = kcu.table_schema AND tc.table_name = kcu.table_name JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name AND tc.table_schema = ccu.table_schema WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = $1 AND tc.constraint_name = $2;` rows, err := db.Query(sql, tableName, referenceConstraintName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() result := make(map[string]string) for rows.Next() { var columnName string var foreignColumnName string err := rows.Scan(&columnName, &foreignColumnName) if err != nil { log.Panic().Err(err).Msg("error getting column data") } result[columnName] = foreignColumnName } return result } func findIndex(indexes map[string]UniqueIndex, columnName string) string { for name, index := range indexes { for _, column := range index.Columns { if strings.Compare(column, columnName) == 0 { return name } } } return "" } func findIndexMostColumns(indexes map[string]UniqueIndex) string { mostCols := 0 result := "" for name, index := range indexes { numCols := len(index.Columns) if numCols > mostCols { result = name mostCols = numCols } } return result } func readPKSequence(db *sql.DB, tableName string) string { sql := `WITH sequences AS ( SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = 'public' ), id_constraints AS ( SELECT tc.constraint_name, tc.table_name, kcu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name WHERE tc.constraint_schema = 'public' AND constraint_type = 'PRIMARY KEY' AND kcu.ordinal_position = 1 AND column_name = 'id' AND tc.table_name = $1 ) SELECT sequence_name FROM id_constraints JOIN sequences ON replace(regexp_replace(constraint_name, '(_id)?_pk(ey)?', ''), '_', '') = replace(regexp_replace(sequence_name, '(_id)?_seq', ''), '_', '')` rows, err := db.Query(sql, tableName) if err != nil { log.Panic().Err(err).Msg("error executing query") } defer rows.Close() var name string rows.Next() rows.Scan(&name) return name } // ReadTablesSchema inspects the DB and returns a list of tables func ReadAllTablesSchema(db *sql.DB) map[string]Table { return ReadTablesSchema(db, readTableNames(db)) } func ReadTablesSchema(db *sql.DB, tableNames []string) map[string]Table { result := make(map[string]Table, 0) for _, tableName := range tableNames { table, err := processTable(db, strings.ToLower(tableName), true) if err { continue } result[table.Name] = table } //Load all reference tables not loaded yet for _, table := range result { result = processReferenceTables(db, table, result) } return result } func processReferenceTables(db *sql.DB, table Table, currentTables map[string]Table) map[string]Table { for _, reference := range table.References { _, ok := currentTables[reference.TableName] if ok { continue } tableProcessed, _ := processTable(db, reference.TableName, false) currentTables[reference.TableName] = tableProcessed currentTables = processReferenceTables(db, tableProcessed, currentTables) } return currentTables } func processTable(db *sql.DB, tableName string, exportable bool) (Table, bool) { columns := readColumnNames(db, tableName) if len(columns) == 0 { log.Info().Msgf("Ignoring nonexisting table %s", tableName) return Table{}, true } columnIndexes := make(map[string]int) for i, columnName := range columns { columnIndexes[columnName] = i } pkColumns := readPKColumnNames(db, tableName) pkColumnMap := make(map[string]bool) for _, column := range pkColumns { pkColumnMap[column] = true } pkSequence := readPKSequence(db, tableName) indexNames := readUniqueIndexNames(db, tableName) indexes := make(map[string]UniqueIndex) for _, indexName := range indexNames { indexColumns := readIndexColumns(db, indexName) indexes[indexName] = UniqueIndex{Name: indexName, Columns: indexColumns} } mainUniqueIndexName := "" if len(indexNames) == 1 { mainUniqueIndexName = indexNames[0] } else if len(indexNames) > 1 { mainUniqueIndexName = findIndex(indexes, "label") if len(mainUniqueIndexName) == 0 { mainUniqueIndexName = findIndex(indexes, "name") if len(mainUniqueIndexName) == 0 { mainUniqueIndexName = findIndex(indexes, "token") if len(mainUniqueIndexName) == 0 { mainUniqueIndexName = findIndexMostColumns(indexes) } } } } constraintNames := readReferenceConstraintNames(db, tableName) references := make([]Reference, 0) for _, constraintName := range constraintNames { columnMap := readReferenceConstraints(db, tableName, constraintName) referencedTable := readReferencedTable(db, constraintName) references = append(references, Reference{TableName: referencedTable, ColumnMapping: columnMap}) } referencedByConstraintNames := readReferencedByConstraintNames(db, tableName) referencedBy := make([]Reference, 0) for _, constraintName := range referencedByConstraintNames { referencedTable := readReferencedByTable(db, constraintName) columnMap := readReferenceConstraints(db, referencedTable, constraintName) referencedBy = append(referencedBy, Reference{TableName: referencedTable, ColumnMapping: columnMap}) } table := Table{ Name: tableName, Export: exportable, Columns: columns, ColumnIndexes: columnIndexes, PKColumns: pkColumnMap, PKSequence: pkSequence, UniqueIndexes: indexes, MainUniqueIndexName: mainUniqueIndexName, References: references, ReferencedBy: referencedBy} table = applyTableFilters(table) return table, false } 0707010000003D000081A4000003E800000064000000016613BCD0000009C3000000000000000000000000000000000000002E00000000inter-server-sync/schemareader/reader_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import ( "reflect" "testing" "github.com/DATA-DOG/go-sqlmock" "github.com/uyuni-project/inter-server-sync/tests" ) const ( TableName = "TableName" PKColumnName = "PKColumnName" UniqueIndexName01 = "UniqueIndexName01" UniqueIndexName02 = "UniqueIndexName02" UniqueIndexName03 = "UniqueIndexName03" IndexColumnName01 = "IndexColumnName01" IndexColumnName02 = "IndexColumnName02" ) func TestProcessTable(t *testing.T) { // Arrange repo := tests.CreateDataRepository() UniqueIndexMostColumnsCase(repo) // Act table, _ := processTable(repo.DB, TableName, true) // Assert indexesEqual := reflect.DeepEqual(table.MainUniqueIndexName, UniqueIndexName03) if !indexesEqual { t.Errorf("UniqueIndexes do not match: expected %s, got %s", UniqueIndexName03, table.MainUniqueIndexName) } } func UniqueIndexMostColumnsCase(repo *tests.DataRepository) { repo.ExpectWithRecords(ReadColumnNames, sqlmock.NewRows([]string{"column_name"}).AddRow(""), TableName) repo.ExpectWithRecords(ReadPkColumnNames, sqlmock.NewRows([]string{"attname"}).AddRow(""), TableName) repo.ExpectWithRecords(ReadPkSequence, sqlmock.NewRows([]string{"sequence_name"}).AddRow(""), TableName) // Read indexes information to get three indexes repo.ExpectWithRecords( ReadUniqueIndexNames, sqlmock.NewRows([]string{"indexrelid"}). AddRow(UniqueIndexName01). AddRow(UniqueIndexName02). AddRow(UniqueIndexName03), TableName, ) // Read columns for index UniqueIndexName01 repo.ExpectWithRecords( ReadIndexColumns, sqlmock.NewRows([]string{"indexrelid"}). // One column in the index AddRow(PKColumnName), UniqueIndexName01, ) // Read columns for index UniqueIndexName02 repo.ExpectWithRecords( ReadIndexColumns, sqlmock.NewRows([]string{"indexrelid"}). // Two columns in the index AddRow(PKColumnName). AddRow(IndexColumnName01), UniqueIndexName02, ) // Read columns for index UniqueIndexName03 repo.ExpectWithRecords( ReadIndexColumns, sqlmock.NewRows([]string{"indexrelid"}). // Three columns in the index AddRow(PKColumnName). AddRow(IndexColumnName01). AddRow(IndexColumnName02), UniqueIndexName03, ) repo.ExpectWithRecords(ReadReferenceConstraintNames, sqlmock.NewRows([]string{"constraint_name"}), TableName) repo.ExpectWithRecords(ReadReferencedByConstraintNames, sqlmock.NewRows([]string{"constraint_name"}), TableName) } 0707010000003E000081A4000003E800000064000000016613BCD000001E13000000000000000000000000000000000000002F00000000inter-server-sync/schemareader/tableFilters.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import ( "regexp" "strings" "github.com/rs/zerolog/log" "github.com/uyuni-project/inter-server-sync/sqlUtil" ) const ( VirtualIndexName = "virtual_main_unique_index" ) func applyTableFilters(table Table) Table { switch table.Name { case "suseproductsccrepository": table.PKSequence = "suse_prdrepo_id_seq" case "rhnchecksumtype": table.PKSequence = "rhn_checksum_id_seq" case "rhnchecksum": table.PKSequence = "rhnchecksum_seq" case "rhnpackagearch": table.PKSequence = "rhn_package_arch_id_seq" case "rhnchannelarch": table.PKSequence = "rhn_channel_arch_id_seq" case "rhnpackagename": // constraint: rhn_pn_id_pk table.PKSequence = "RHN_PKG_NAME_SEQ" case "rhnpackagenevra": table.PKSequence = "rhn_pkgnevra_id_seq" case "rhnpackagesource": table.PKSequence = "rhn_package_source_id_seq" case "rhnpackagekey": table.PKSequence = "rhn_pkey_id_seq" case "rhnpackageextratag": virtualIndexColumns := []string{"package_id", "key_id"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnpackageevr": // constraint: rhn_pe_id_pk table.PKSequence = "rhn_pkg_evr_seq" unexportColumns := make(map[string]bool) unexportColumns["type"] = true table.UnexportColumns = unexportColumns table.UniqueIndexes["rhn_pe_v_r_e_uq"] = UniqueIndex{Name: "rhn_pe_v_r_e_uq", Columns: append(table.UniqueIndexes["rhn_pe_v_r_e_uq"].Columns, "type")} table.UniqueIndexes["rhn_pe_v_r_uq"] = UniqueIndex{Name: "rhn_pe_v_r_uq", Columns: append(table.UniqueIndexes["rhn_pe_v_r_uq"].Columns, "type")} case "rhnpackage": // We need to add a virtual unique constraint table.PKSequence = "RHN_PACKAGE_ID_SEQ" virtualIndexColumns := []string{"name_id", "evr_id", "package_arch_id", "checksum_id", "org_id"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnpackagechangelogdata": // We need to add a virtual unique constraint table.PKSequence = "rhn_pkg_cld_id_seq" virtualIndexColumns := []string{"name", "text", "time"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnpackagechangelogrec": table.PKSequence = "rhn_pkg_cl_id_seq" case "rhnpackagecapability": // pkid: rhn_pkg_capability_id_pk table.PKSequence = "RHN_PKG_CAPABILITY_ID_SEQ" // table has real unique index, but they are complex and useless, since we do nothing in the conflict // to simplify the code we can create a virtual index that will insure all data exists as supposed virtualIndexColumns := []string{"name", "version"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnconfigfiletype": virtualIndexColumns := []string{"label"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnconfigfile": unexportColumns := make(map[string]bool) unexportColumns["latest_config_revision_id"] = true table.UnexportColumns = unexportColumns case "rhnconfigcontent": virtualIndexColumns := []string{"contents", "file_size", "checksum_id", "is_binary", "delim_start", "delim_end", "created"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "suseimageinfo": unexportColumns := make(map[string]bool) // Ignore actions relevant only to source server unexportColumns["build_action_id"] = true unexportColumns["inspect_action_id"] = true unexportColumns["build_server_id"] = true unexportColumns["log"] = true table.UnexportColumns = unexportColumns // Unfortunately images have only ID unique and that is not enough for our guessing game. // Create virtual compound index then as close as we can get virtualIndexColumns := []string{"name", "version", "image_type", "image_arch_id", "org_id", "curr_revision_num"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "suseimageinfochannel": virtualIndexColumns := []string{"channel_id", "image_info_id"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "suseimageprofile": table.PKSequence = "suse_imgprof_prid_seq" // rhnregtoken is completely non-unique standalone, use rhnactivation key instead as reference to the same id references := make([]Reference, 0) for _, r := range table.References { if strings.Compare(r.TableName, "rhnregtoken") == 0 { ref := Reference{} ref.TableName = "rhnactivationkey" ref.ColumnMapping = map[string]string{ "token_id": "reg_token_id", } references = append(references, ref) } else { references = append(references, r) } } table.References = references case "susekiwiprofile": virtualIndexColumns := []string{"profile_id"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "susedockerfileprofile": virtualIndexColumns := []string{"profile_id", "path"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnerrata": // this table has two unique indexes with the same size which can be used // we are fixing the usage to one of them to make it deterministic table.MainUniqueIndexName = "rhn_errata_adv_org_uq" table.RowModCallback = func(value []sqlUtil.RowDataStructure, table Table) []sqlUtil.RowDataStructure { for i, row := range value { if strings.Compare(row.ColumnName, "severity_id") == 0 { value[i].Value = value[i].GetInitialValue() } } return value } case "susesaltpillar": table.RowModCallback = func(value []sqlUtil.RowDataStructure, table Table) []sqlUtil.RowDataStructure { isImagePillar := false pillarColumn := 0 for i, column := range value { if strings.Compare(column.ColumnName, "category") == 0 && strings.HasPrefix(column.Value.(string), "Image") { log.Trace().Msgf("Updating pillar URLs of %s", column.Value) isImagePillar = true } else if strings.Compare(column.ColumnName, "pillar") == 0 { pillarColumn = i } } if isImagePillar { re := regexp.MustCompile(`https://[^/]+/os-images/`) repl := []byte("https://{SERVER_FQDN}/os-images/") value[pillarColumn].Value = re.ReplaceAll(value[pillarColumn].Value.([]byte), repl) } return value } virtualIndexColumns := []string{"server_id", "group_id", "org_id", "category"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "suseimagefile": table.PKSequence = "suse_image_file_id_seq" virtualIndexColumns := []string{"image_info_id", "file"} table.UniqueIndexes[VirtualIndexName] = UniqueIndex{Name: VirtualIndexName, Columns: virtualIndexColumns} table.MainUniqueIndexName = VirtualIndexName case "rhnpackageextratagkey": table.PKSequence = "rhn_package_extra_tags_keys_id_seq" } return table } 0707010000003F000081A4000003E800000064000000016613BCD000000561000000000000000000000000000000000000002800000000inter-server-sync/schemareader/types.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package schemareader import "github.com/uyuni-project/inter-server-sync/sqlUtil" // Table represents a DB table to dump type Table struct { Name string Export bool Columns []string UnexportColumns map[string]bool ColumnIndexes map[string]int PKColumns map[string]bool PKSequence string UniqueIndexes map[string]UniqueIndex // a unique index is main when it is the preferred "natural" key MainUniqueIndexName string References []Reference ReferencedBy []Reference RowModCallback TableCallback } // UniqueIndex represents an index among columns of a Table type UniqueIndex struct { Name string Columns []string } // Reference represents a foreign key relationship to a Table type Reference struct { TableName string ColumnMapping map[string]string } // Row modification callback function type TableCallback func(value []sqlUtil.RowDataStructure, table Table) []sqlUtil.RowDataStructure // we are returning just one reference, the first one which uses the column we want func (table *Table) GetFirstReferenceFromColumn(columnName string) Reference { for _, reference := range table.References { _, ok := reference.ColumnMapping[columnName] if ok { return reference } } return Reference{} } 07070100000040000081ED000003E800000064000000016613BCD0000000C6000000000000000000000000000000000000001B00000000inter-server-sync/setup.sh# SPDX-FileCopyrightText: 2023 SUSE LLC # # SPDX-License-Identifier: Apache-2.0 set -euxo pipefail go mod vendor && tar czvf vendor.tar.gz vendor >/dev/null && rm -rf vendor echo "vendor.tar.gz" 07070100000041000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001A00000000inter-server-sync/sqlUtil07070100000042000081A4000003E800000064000000016613BCD0000009AE000000000000000000000000000000000000002B00000000inter-server-sync/sqlUtil/queryExecuter.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package sqlUtil import ( "database/sql" "reflect" "github.com/rs/zerolog/log" ) type RowDataStructure struct { ColumnName string ColumnType string initialValue interface{} Value interface{} } func (row RowDataStructure) GetInitialValue() interface{} { return row.initialValue } func ExecuteQueryWithResults(db *sql.DB, sql string, scanParameters ...interface{}) [][]RowDataStructure { rows, err := db.Query(sql, scanParameters...) if err != nil { log.Printf("Error : While executing '%s', with parameters %s", sql, scanParameters) log.Panic().Err(err).Msg("error executing query") } defer rows.Close() // get column type info columnTypes, err := rows.ColumnTypes() if err != nil { log.Panic().Err(err).Msg("error getting column types") } // used for allocation & dereferencing rowValues := make([]reflect.Value, len(columnTypes)) for i := 0; i < len(columnTypes); i++ { // allocate reflect.Value representing a **T Value rowValues[i] = reflect.New(reflect.PtrTo(columnTypes[i].ScanType())) } computedValues := make([][]RowDataStructure, 0) for rows.Next() { // initially will hold pointers for Scan, after scanning the // pointers will be dereferenced so that the slice holds actual values rowResult := make([]interface{}, len(columnTypes)) for i := 0; i < len(columnTypes); i++ { // get the **T Value from the reflect.Value rowResult[i] = rowValues[i].Interface() } // scan each column Value into the corresponding **T Value if err := rows.Scan(rowResult...); err != nil { log.Panic().Err(err).Msg("error getting rows") } // dereference pointers rowComputedValues := make([]RowDataStructure, 0) for i := 0; i < len(rowValues); i++ { // first pointer deref to get reflect.Value representing a *T Value, // if rv.IsNil it means column Value was NULL if rv := rowValues[i].Elem(); rv.IsNil() { rowResult[i] = nil } else { // second deref to get reflect.Value representing the T Value // and call Interface to get T Value from the reflect.Value rowResult[i] = rv.Elem().Interface() } rowComputedValues = append(rowComputedValues, RowDataStructure{ColumnType: columnTypes[i].DatabaseTypeName(), initialValue: rowResult[i], Value: rowResult[i], ColumnName: columnTypes[i].Name()}) } computedValues = append(computedValues, rowComputedValues) } return computedValues } 07070100000043000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001D00000000inter-server-sync/sql_helper07070100000044000081A4000003E800000064000000016613BCD00000161E000000000000000000000000000000000000003400000000inter-server-sync/sql_helper/check_channel_data.sql-- SPDX-FileCopyrightText: 2023 SUSE LLC -- -- SPDX-License-Identifier: Apache-2.0 ---------- ----OK -- Initial catalogs select * from rhnproductname where id in (select product_name_id from rhnchannel where id = 118); select * from rhnchannelproduct where id in (select channel_product_id from rhnchannel where id = 118); select * from rhnarchtype where id in (select arch_type_id from rhnchannelarch where id in (select channel_arch_id from rhnchannel where id = 118)); -- can be incomplete select * from rhnchecksumtype where id in (select checksum_type_id from rhnchannel where id = 118); -- can be incomplete select * from rhnpackagearch where id in (select package_arch_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from web_customer where id in (select org_id from rhnchannel where id = 118); select * from rhnchannelarch where id in (select channel_arch_id from rhnchannel where id = 118); select * from rhnerrataseverity where id in (select severity_id from rhnerrata where id in (select errata_id from rhnchannelerrata where channel_id = 118)) --- step 2 select * from rhnchannel where id = 118; select * from rhnchannelfamilymembers where channel_id = 118; select * from rhnchannelfamily where id in (select channel_family_id from rhnchannelfamilymembers where channel_id = 118); select * from rhnerrata where id in (select errata_id from rhnchannelerrata where channel_id = 118); select * from rhnchannelerrata where channel_id = 118; --- step 3 select * from rhnpackagename where id in (select name_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackagegroup where id in (select package_group from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnsourcerpm where id in (select source_rpm_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackageevr where id in (select evr_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnchannelpackage where channel_id = 118; select * from rhnerratapackage where errata_id in (select errata_id from rhnchannelerrata where channel_id = 118); select * from rhnpackageprovider where id in (select provider_id from rhnpackagekey where id in (select rhnpackagekeyassociation.key_id from rhnpackagekeyassociation where package_id in (select package_id from rhnchannelpackage where channel_id = 118))); select * from rhnpackagekeytype where id in (select key_type_id from rhnpackagekey where id in (select rhnpackagekeyassociation.key_id from rhnpackagekeyassociation where package_id in (select package_id from rhnchannelpackage where channel_id = 118))); select * from rhnpackagekey where id in (select rhnpackagekeyassociation.key_id from rhnpackagekeyassociation where package_id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackagekeyassociation where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnerratabuglist where errata_id in (select errata_id from rhnchannelerrata where channel_id = 118); select * from rhnpackagecapability where id in (select capability_id from rhnpackagebreaks where package_id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackagebreaks where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackagechangelogdata where id in (select changelog_data_id from rhnpackagechangelogrec where package_id in (select package_id from rhnchannelpackage where channel_id = 118)); select * from rhnpackagechangelogrec where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackageconflicts where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackageenhances where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackagefile where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackageobsoletes where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackagepredepends where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackageprovides where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackagerequires where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnpackagesuggests where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnsourcerpm where id in ( select source_rpm_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118) union all select source_rpm_id from rhnpackagesource ); select * from rhnpackagerecommends where package_id in (select package_id from rhnchannelpackage where channel_id = 118); select * from rhnchecksum where id in (select checksum_id from rhnpackage where id in (select package_id from rhnchannelpackage where channel_id = 118) union all select checksum_id from rhnpackagefile where package_id in (select package_id from rhnchannelpackage where channel_id = 118)); ------ ---- NOT tested -- rhnpackagesource -- rhnpackagesuggests 07070100000045000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001800000000inter-server-sync/tests07070100000046000081A4000003E800000064000000016613BCD000000B1F000000000000000000000000000000000000002200000000inter-server-sync/tests/README.md<!-- SPDX-FileCopyrightText: 2023 SUSE LLC SPDX-License-Identifier: Apache-2.0 --> # Testing ## Quick start With the following command you can let all the available tests be executed. ```shell go test ./... -cover -race ``` We keep checking for the **race conditions** by default, although there is no concurrency implemented just yet. However, we'd better still have it, especially since the async I/O should come around in the future. This command is also included in the github-actions workflow. ## Structure ### General All the test files have a postfix `_test` in their names. Besides, a testing function should have a signature of a following form `func Test<your-name>(t *testing.T) {...}`. According to the common convention we also keep the test files close to the corresponding functionality we test. These rules allow the runtime, and also anyone who contributes to the test suit, to pick up the tests automatically without further need to specify the paths. The test code gets excluded from the build automatically as well. ### Code reuse There are a couple of repeating steps we make to prepare a test base. For this reason we shifted some common code up to the `./tests/` package. Whenever you need to mock an object external to the functionality being tested, it would be a good idea to put it into this common package, since chances are it will be reused somewhere else in the test suit. ## Common cases ### Testing and/or mocking the I/O operations You can create a mocked data repository object by calling ```go var repo *tests.DataRepository = tests.CreateDataRepository() ``` The repository is able to simulate reading from a sql database and writing the results into a buffer. On the data repository we can define what sql statements we are expecting from our code. Expect adds data to repository, which can then be retrieved by the tested function. ```go repo.Expect("SELECT id, fk_id FROM tableOne;", 1) repo.Expect("SELECT id, fk_id FROM tableTwo;", 1) ``` The data repository expect these statements in the exact same order. **Important:** the SQL statements check is exclusive. If the code tries to submit an SQL-statement not expected by the repository, the test will fail. However, if there are any expectations we defined that were left unexecuted, we need to check for them manually. `ExpectationsWereMet` checks whether all queued expectations were met in order. If any of them was not met, an error is returned. ```go // checks if all expected statements were indeed executed against the db if err := testCase.repo.ExpectationsWereMet(); err != nil { t.Errorf("Some nodes left unexported. Error message: %s", err) } ``` When testing the writing behavior, you can get the contents of the buffer by calling ```go var writtenBuffer []string = testCase.repo.GetWriterBuffer() ``` 07070100000047000081A4000003E800000064000000016613BCD000000A91000000000000000000000000000000000000002100000000inter-server-sync/tests/utils.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package tests import ( "bufio" "database/sql" "database/sql/driver" "fmt" "github.com/DATA-DOG/go-sqlmock" ) // DataRepository encapsulates I/O operations. type DataRepository struct { DB *sql.DB mock sqlmock.Sqlmock Writer *bufio.Writer mockWriter *MockWriter } // CreateDataRepository factory method for the DataRepository func CreateDataRepository() *DataRepository { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) mock.MatchExpectationsInOrder(true) checkErr(err) mockWriter := &MockWriter{} writerAdapter := bufio.NewWriter(mockWriter) return &DataRepository{DB: db, mock: mock, Writer: writerAdapter, mockWriter: mockWriter} } // Expect adds data to repository, which can then be retrieved by the tested function. func (repo *DataRepository) Expect(stm string, columns []string, numRecords int, args ...driver.Value) { // simulate table having only one row recs := sqlmock. NewRows(columns) for i := 0; i < numRecords; i++ { res := []driver.Value{fmt.Sprintf("%04d", i+1)} for j := 1; j < len(columns); j++ { res = append(res, fmt.Sprintf("%04d", 1)) } recs = recs.AddRow(res...) } // add mock expectation if len(args) > 0 { repo.mock. ExpectQuery(stm). WithArgs(args...). WillReturnRows(recs). RowsWillBeClosed() } else { repo.mock. ExpectQuery(stm). WillReturnRows(recs). RowsWillBeClosed() } } // ExpectInfoSchema adds data to repository, which can then be retrieved by the tested function. func (repo *DataRepository) ExpectWithRecords(stm string, recs *sqlmock.Rows, args ...driver.Value) { // add mock expectation if len(args) > 0 { repo.mock. ExpectQuery(stm). WithArgs(args...). WillReturnRows(recs). RowsWillBeClosed() } else { repo.mock. ExpectQuery(stm). WillReturnRows(recs). RowsWillBeClosed() } } // ExpectationsWereMet checks whether all queued expectations // were met in order. If any of them was not met - an error is returned. func (repo *DataRepository) ExpectationsWereMet() error { return repo.mock.ExpectationsWereMet() } func (repo *DataRepository) GetWriterBuffer() []string { err := repo.Writer.Flush() checkErr(err) return repo.mockWriter.data } // MockWriter allows to create a mock bufferWriter object, as it implements the interface type MockWriter struct { data []string } func (mr *MockWriter) Write(p []byte) (n int, err error) { mr.data = append(mr.data, string(p)) return len(p), nil } func (mr *MockWriter) GetData() []string { return mr.data } func checkErr(err error) { if err != nil { panic(err) } } 07070100000048000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001800000000inter-server-sync/utils07070100000049000081ED000003E800000064000000016613BCD0000011A5000000000000000000000000000000000000002100000000inter-server-sync/utils/utils.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package utils import ( "bufio" "bytes" "fmt" "os" "os/exec" "path/filepath" "reflect" "strings" "time" "github.com/rs/zerolog/log" ) // Return default config paths - etc default, web default, package default func getDefaultConfigs() []string { return []string{"/etc/rhn/rhn.conf", "/usr/share/rhn/config-defaults/rhn_web.conf", "/usr/share/rhn/config-defaults/rhn.conf"} } //ReverseArray reverses the array func ReverseArray(slice interface{}) { size := reflect.ValueOf(slice).Len() swap := reflect.Swapper(slice) for i, j := 0, size-1; i < j; i, j = i+1, j-1 { swap(i, j) } } // Contains is a helper method to check if a string element exist in the string slice func Contains(slice []string, elementToFind string) bool { for _, element := range slice { if strings.ToLower(elementToFind) == strings.ToLower(element) { return true } } return false } func GetAbsPath(path string) string { result := path if filepath.IsAbs(path) { result, _ = filepath.Abs(path) } else { homedir, err := os.UserHomeDir() if err != nil { log.Fatal().Msg("Couldn't determine the home directory") } if strings.HasPrefix(path, "~") { result = strings.Replace(path, "~", homedir, -1) } } return result } func FolderExists(path string) error { folder, err := os.Open(path) defer folder.Close() if err != nil { return err } folderInfo, err := folder.Stat() if err != nil { return err } if !folderInfo.IsDir() { return fmt.Errorf("path is not a directory: %s", path) } return nil } func GetCurrentServerVersion(serverConfig string) (string, string) { files := []string{serverConfig} files = append(files, getDefaultConfigs()...) property := []string{"product_name", "web.product_name"} product := "SUSE Manager" p, err := getProperty(files, property) if err == nil { product = p } propertyVersion := []string{"web.version"} if product != "SUSE Manager" { propertyVersion = []string{"web.version.uyuni"} product = "uyuni" } version, err := getProperty(files, propertyVersion) if err != nil { log.Fatal().Msgf("No version found for product %s", product) } return version, product } func GetCurrentServerFQDN(serverConfig string) string { files := []string{serverConfig} files = append(files, getDefaultConfigs()...) property := []string{"cobbler.host"} p, err := getProperty(files, property) if err != nil { log.Error().Msgf("FQDN of server not found, images pillar may not be processed correclty") return "" } return p } func getProperty(filePaths []string, names []string) (string, error) { for _, search := range names { for _, path := range filePaths { p, err := ScannerFunc(path, search) if err == nil { return p, nil } } } return "", fmt.Errorf("String not found!") } func ScannerFunc(path string, search string) (string, error) { f, err := os.Open(path) if err != nil { log.Fatal().Msgf("Couldn't open file: %s", path) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { linetext := scanner.Text() index := strings.Index(linetext, "=") if index < 0 { continue } key := strings.Trim(linetext[:index], " ") if key == search { return strings.Trim(linetext[index+1:], " "), nil } } return "", fmt.Errorf("String not found!") } func ValidateDate(date string) (string, bool) { if date == "" { return "", true } for _, layout := range []string{"2006-01-02 15:04:05", "2006-01-02"} { t, err := time.Parse(layout, date) if err == nil { return t.Format(layout), true } } return "", false } func ReadFileByLine(path string) []string { msg := fmt.Sprintf("error opening file at %s", path) file, err := os.Open(path) checkError(err, msg) defer func(file *os.File) { err := file.Close() checkError(err, msg) }(file) scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanLines) var labels []string for scanner.Scan() { labels = append(labels, scanner.Text()) } return labels } // ExecInteractivePrompt calls a command, expects an interactive prompt to start, passes the given input into it. func ExecInteractivePrompt(name string, input string) error { cmd := exec.Command(name) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr buffer := bytes.Buffer{} buffer.Write([]byte(input)) cmd.Stdin = &buffer return cmd.Run() } func checkError(err error, msg string) { if err != nil { log.Fatal().Err(err).Msg(msg) } } 0707010000004A000081A4000003E800000064000000016613BCD0000003D2000000000000000000000000000000000000002600000000inter-server-sync/utils/utils_test.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package utils import ( "testing" ) func TestArrayRevert(t *testing.T) { myArray := []int{1, 2, 3} myArrayRevert := make([]int, len(myArray)) copy(myArrayRevert, myArray) ReverseArray(myArrayRevert) for i, value := range myArray { if myArrayRevert[len(myArray)-i-1] != value { t.Fatalf("values are different: %d -> %d", myArrayRevert[len(myArray)-i-1], value) // to indicate test failed } } } func TestValidateDateValid(t *testing.T) { date := "2022-01-01" validatedDate, ok := ValidateDate(date) if !ok { t.Errorf("The date is not validated properly.") } if date != validatedDate { t.Errorf("The date is not validated properly.") } } func TestValidateDateInvalid(t *testing.T) { date := "" validatedDate, ok := ValidateDate(date) if !ok { t.Errorf("The date should be valid.") } if validatedDate != "" { t.Errorf("The date is not validated properly.") } } 0707010000004B000041ED000003E800000064000000016613BCD000000000000000000000000000000000000000000000001900000000inter-server-sync/xmlrpc0707010000004C000081A4000003E800000064000000016613BCD00000082D000000000000000000000000000000000000002300000000inter-server-sync/xmlrpc/client.go// SPDX-FileCopyrightText: 2023 SUSE LLC // // SPDX-License-Identifier: Apache-2.0 package xmlrpc import ( "context" "github.com/uyuni-project/xmlrpc-public-methods" "net" "net/http" "time" ) const ( ConnectTimeout = 10 RequestTimeout = 10 Endpoint = "http://localhost/rpc/api" AuthMethod = "auth.login" SyncMethod = "configchannel.syncSaltFilesOnDisk" ) type Client interface { SyncConfigFiles(labels []string) (interface{}, error) } type client struct { connectTimeout int requestTimeout int endpoint string username string password string } func NewClient(username string, password string) *client { return &client{ ConnectTimeout, RequestTimeout, Endpoint, username, password, } } func (c *client) executeCall(endpoint string, call string, args []interface{}) (response interface{}, err error) { client, err := getClientWithTimeout(endpoint, c.connectTimeout, c.requestTimeout) if err != nil { return nil, err } defer client.Close() err = client.Call(call, args, &response) return response, err } func getClientWithTimeout(url string, connectTimeout, requestTimeout int) (*xmlrpc.Client, error) { transport := http.Transport{ DialContext: timeoutDialer(time.Duration(connectTimeout)*time.Second, time.Duration(requestTimeout)*time.Second), } return xmlrpc.NewClient(url, &transport) } func timeoutDialer(connectTimeout, requestTimeout time.Duration) func(ctx context.Context, net, addr string) (c net.Conn, err error) { return func(ctx context.Context, netw, addr string) (net.Conn, error) { conn, err := net.DialTimeout(netw, addr, connectTimeout) if err != nil { return nil, err } conn.SetDeadline(time.Now().Add(requestTimeout)) return conn, nil } } func (c *client) SyncConfigFiles(labels []string) (interface{}, error) { credentials := []interface{}{c.username, c.password} token, err := c.executeCall(c.endpoint, AuthMethod, credentials) if err != nil { return nil, err } syncPayload := []interface{}{token, labels} return c.executeCall(c.endpoint, SyncMethod, syncPayload) } 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor